48
48
#define AMD_PSTATE_TRANSITION_LATENCY 0x20000
49
49
#define AMD_PSTATE_TRANSITION_DELAY 500
50
50
51
+ /*
52
+ * TODO: We need more time to fine tune processors with shared memory solution
53
+ * with community together.
54
+ *
55
+ * There are some performance drops on the CPU benchmarks which reports from
56
+ * Suse. We are co-working with them to fine tune the shared memory solution. So
57
+ * we disable it by default to go acpi-cpufreq on these processors and add a
58
+ * module parameter to be able to enable it manually for debugging.
59
+ */
60
+ static bool shared_mem = false;
61
+ module_param (shared_mem , bool , 0444 );
62
+ MODULE_PARM_DESC (shared_mem ,
63
+ "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)" );
64
+
51
65
static struct cpufreq_driver amd_pstate_driver ;
52
66
53
67
/**
@@ -85,12 +99,32 @@ struct amd_cpudata {
85
99
u32 lowest_nonlinear_freq ;
86
100
};
87
101
88
- static inline int amd_pstate_enable (bool enable )
102
+ static inline int pstate_enable (bool enable )
89
103
{
90
104
return wrmsrl_safe (MSR_AMD_CPPC_ENABLE , enable );
91
105
}
92
106
93
- static int amd_pstate_init_perf (struct amd_cpudata * cpudata )
107
+ static int cppc_enable (bool enable )
108
+ {
109
+ int cpu , ret = 0 ;
110
+
111
+ for_each_present_cpu (cpu ) {
112
+ ret = cppc_set_enable (cpu , enable );
113
+ if (ret )
114
+ return ret ;
115
+ }
116
+
117
+ return ret ;
118
+ }
119
+
120
+ DEFINE_STATIC_CALL (amd_pstate_enable , pstate_enable );
121
+
122
+ static inline int amd_pstate_enable (bool enable )
123
+ {
124
+ return static_call (amd_pstate_enable )(enable );
125
+ }
126
+
127
+ static int pstate_init_perf (struct amd_cpudata * cpudata )
94
128
{
95
129
u64 cap1 ;
96
130
@@ -113,8 +147,33 @@ static int amd_pstate_init_perf(struct amd_cpudata *cpudata)
113
147
return 0 ;
114
148
}
115
149
116
- static void amd_pstate_update_perf (struct amd_cpudata * cpudata , u32 min_perf ,
117
- u32 des_perf , u32 max_perf , bool fast_switch )
150
+ static int cppc_init_perf (struct amd_cpudata * cpudata )
151
+ {
152
+ struct cppc_perf_caps cppc_perf ;
153
+
154
+ int ret = cppc_get_perf_caps (cpudata -> cpu , & cppc_perf );
155
+ if (ret )
156
+ return ret ;
157
+
158
+ WRITE_ONCE (cpudata -> highest_perf , amd_get_highest_perf ());
159
+
160
+ WRITE_ONCE (cpudata -> nominal_perf , cppc_perf .nominal_perf );
161
+ WRITE_ONCE (cpudata -> lowest_nonlinear_perf ,
162
+ cppc_perf .lowest_nonlinear_perf );
163
+ WRITE_ONCE (cpudata -> lowest_perf , cppc_perf .lowest_perf );
164
+
165
+ return 0 ;
166
+ }
167
+
168
+ DEFINE_STATIC_CALL (amd_pstate_init_perf , pstate_init_perf );
169
+
170
+ static inline int amd_pstate_init_perf (struct amd_cpudata * cpudata )
171
+ {
172
+ return static_call (amd_pstate_init_perf )(cpudata );
173
+ }
174
+
175
+ static void pstate_update_perf (struct amd_cpudata * cpudata , u32 min_perf ,
176
+ u32 des_perf , u32 max_perf , bool fast_switch )
118
177
{
119
178
if (fast_switch )
120
179
wrmsrl (MSR_AMD_CPPC_REQ , READ_ONCE (cpudata -> cppc_req_cached ));
@@ -123,6 +182,29 @@ static void amd_pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
123
182
READ_ONCE (cpudata -> cppc_req_cached ));
124
183
}
125
184
185
+ static void cppc_update_perf (struct amd_cpudata * cpudata ,
186
+ u32 min_perf , u32 des_perf ,
187
+ u32 max_perf , bool fast_switch )
188
+ {
189
+ struct cppc_perf_ctrls perf_ctrls ;
190
+
191
+ perf_ctrls .max_perf = max_perf ;
192
+ perf_ctrls .min_perf = min_perf ;
193
+ perf_ctrls .desired_perf = des_perf ;
194
+
195
+ cppc_set_perf (cpudata -> cpu , & perf_ctrls );
196
+ }
197
+
198
+ DEFINE_STATIC_CALL (amd_pstate_update_perf , pstate_update_perf );
199
+
200
+ static inline void amd_pstate_update_perf (struct amd_cpudata * cpudata ,
201
+ u32 min_perf , u32 des_perf ,
202
+ u32 max_perf , bool fast_switch )
203
+ {
204
+ static_call (amd_pstate_update_perf )(cpudata , min_perf , des_perf ,
205
+ max_perf , fast_switch );
206
+ }
207
+
126
208
static void amd_pstate_update (struct amd_cpudata * cpudata , u32 min_perf ,
127
209
u32 des_perf , u32 max_perf , bool fast_switch )
128
210
{
@@ -332,7 +414,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
332
414
/* It will be updated by governor */
333
415
policy -> cur = policy -> cpuinfo .min_freq ;
334
416
335
- policy -> fast_switch_possible = true;
417
+ if (boot_cpu_has (X86_FEATURE_CPPC ))
418
+ policy -> fast_switch_possible = true;
336
419
337
420
/* Initial processor data capability frequencies */
338
421
cpudata -> max_freq = max_freq ;
@@ -364,7 +447,6 @@ static struct cpufreq_driver amd_pstate_driver = {
364
447
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS ,
365
448
.verify = amd_pstate_verify ,
366
449
.target = amd_pstate_target ,
367
- .adjust_perf = amd_pstate_adjust_perf ,
368
450
.init = amd_pstate_cpu_init ,
369
451
.exit = amd_pstate_cpu_exit ,
370
452
.name = "amd-pstate" ,
@@ -387,8 +469,15 @@ static int __init amd_pstate_init(void)
387
469
return - EEXIST ;
388
470
389
471
/* capability check */
390
- if (!boot_cpu_has (X86_FEATURE_CPPC )) {
391
- pr_debug ("AMD CPPC MSR based functionality is not supported\n" );
472
+ if (boot_cpu_has (X86_FEATURE_CPPC )) {
473
+ pr_debug ("AMD CPPC MSR based functionality is supported\n" );
474
+ amd_pstate_driver .adjust_perf = amd_pstate_adjust_perf ;
475
+ } else if (shared_mem ) {
476
+ static_call_update (amd_pstate_enable , cppc_enable );
477
+ static_call_update (amd_pstate_init_perf , cppc_init_perf );
478
+ static_call_update (amd_pstate_update_perf , cppc_update_perf );
479
+ } else {
480
+ pr_info ("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n" );
392
481
return - ENODEV ;
393
482
}
394
483
0 commit comments