10
10
11
11
#define pr_fmt (fmt ) "CPPC Cpufreq:" fmt
12
12
13
- #include <linux/arch_topology.h>
14
13
#include <linux/kernel.h>
15
14
#include <linux/module.h>
16
15
#include <linux/delay.h>
17
16
#include <linux/cpu.h>
18
17
#include <linux/cpufreq.h>
19
18
#include <linux/dmi.h>
20
- #include <linux/irq_work.h>
21
- #include <linux/kthread.h>
22
19
#include <linux/time.h>
23
20
#include <linux/vmalloc.h>
24
- #include <uapi/linux/sched/types.h>
25
21
26
22
#include <asm/unaligned.h>
27
23
@@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
61
57
}
62
58
};
63
59
64
- #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
65
-
66
- /* Frequency invariance support */
67
- struct cppc_freq_invariance {
68
- int cpu ;
69
- struct irq_work irq_work ;
70
- struct kthread_work work ;
71
- struct cppc_perf_fb_ctrs prev_perf_fb_ctrs ;
72
- struct cppc_cpudata * cpu_data ;
73
- };
74
-
75
- static DEFINE_PER_CPU (struct cppc_freq_invariance , cppc_freq_inv ) ;
76
- static struct kthread_worker * kworker_fie ;
77
- static bool fie_disabled ;
78
-
79
- static struct cpufreq_driver cppc_cpufreq_driver ;
80
- static unsigned int hisi_cppc_cpufreq_get_rate (unsigned int cpu );
81
- static int cppc_perf_from_fbctrs (struct cppc_cpudata * cpu_data ,
82
- struct cppc_perf_fb_ctrs fb_ctrs_t0 ,
83
- struct cppc_perf_fb_ctrs fb_ctrs_t1 );
84
-
85
- /**
86
- * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
87
- * @work: The work item.
88
- *
89
- * The CPPC driver register itself with the topology core to provide its own
90
- * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
91
- * gets called by the scheduler on every tick.
92
- *
93
- * Note that the arch specific counters have higher priority than CPPC counters,
94
- * if available, though the CPPC driver doesn't need to have any special
95
- * handling for that.
96
- *
97
- * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
98
- * reach here from hard-irq context), which then schedules a normal work item
99
- * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
100
- * based on the counter updates since the last tick.
101
- */
102
- static void cppc_scale_freq_workfn (struct kthread_work * work )
103
- {
104
- struct cppc_freq_invariance * cppc_fi ;
105
- struct cppc_perf_fb_ctrs fb_ctrs = {0 };
106
- struct cppc_cpudata * cpu_data ;
107
- unsigned long local_freq_scale ;
108
- u64 perf ;
109
-
110
- cppc_fi = container_of (work , struct cppc_freq_invariance , work );
111
- cpu_data = cppc_fi -> cpu_data ;
112
-
113
- if (cppc_get_perf_ctrs (cppc_fi -> cpu , & fb_ctrs )) {
114
- pr_warn ("%s: failed to read perf counters\n" , __func__ );
115
- return ;
116
- }
117
-
118
- cppc_fi -> prev_perf_fb_ctrs = fb_ctrs ;
119
- perf = cppc_perf_from_fbctrs (cpu_data , cppc_fi -> prev_perf_fb_ctrs ,
120
- fb_ctrs );
121
-
122
- perf <<= SCHED_CAPACITY_SHIFT ;
123
- local_freq_scale = div64_u64 (perf , cpu_data -> perf_caps .highest_perf );
124
- if (WARN_ON (local_freq_scale > 1024 ))
125
- local_freq_scale = 1024 ;
126
-
127
- per_cpu (arch_freq_scale , cppc_fi -> cpu ) = local_freq_scale ;
128
- }
129
-
130
- static void cppc_irq_work (struct irq_work * irq_work )
131
- {
132
- struct cppc_freq_invariance * cppc_fi ;
133
-
134
- cppc_fi = container_of (irq_work , struct cppc_freq_invariance , irq_work );
135
- kthread_queue_work (kworker_fie , & cppc_fi -> work );
136
- }
137
-
138
- static void cppc_scale_freq_tick (void )
139
- {
140
- struct cppc_freq_invariance * cppc_fi = & per_cpu (cppc_freq_inv , smp_processor_id ());
141
-
142
- /*
143
- * cppc_get_perf_ctrs() can potentially sleep, call that from the right
144
- * context.
145
- */
146
- irq_work_queue (& cppc_fi -> irq_work );
147
- }
148
-
149
- static struct scale_freq_data cppc_sftd = {
150
- .source = SCALE_FREQ_SOURCE_CPPC ,
151
- .set_freq_scale = cppc_scale_freq_tick ,
152
- };
153
-
154
- static void cppc_freq_invariance_policy_init (struct cpufreq_policy * policy ,
155
- struct cppc_cpudata * cpu_data )
156
- {
157
- struct cppc_perf_fb_ctrs fb_ctrs = {0 };
158
- struct cppc_freq_invariance * cppc_fi ;
159
- int i , ret ;
160
-
161
- if (cppc_cpufreq_driver .get == hisi_cppc_cpufreq_get_rate )
162
- return ;
163
-
164
- if (fie_disabled )
165
- return ;
166
-
167
- for_each_cpu (i , policy -> cpus ) {
168
- cppc_fi = & per_cpu (cppc_freq_inv , i );
169
- cppc_fi -> cpu = i ;
170
- cppc_fi -> cpu_data = cpu_data ;
171
- kthread_init_work (& cppc_fi -> work , cppc_scale_freq_workfn );
172
- init_irq_work (& cppc_fi -> irq_work , cppc_irq_work );
173
-
174
- ret = cppc_get_perf_ctrs (i , & fb_ctrs );
175
- if (ret ) {
176
- pr_warn ("%s: failed to read perf counters: %d\n" ,
177
- __func__ , ret );
178
- fie_disabled = true;
179
- } else {
180
- cppc_fi -> prev_perf_fb_ctrs = fb_ctrs ;
181
- }
182
- }
183
- }
184
-
185
- static void __init cppc_freq_invariance_init (void )
186
- {
187
- struct sched_attr attr = {
188
- .size = sizeof (struct sched_attr ),
189
- .sched_policy = SCHED_DEADLINE ,
190
- .sched_nice = 0 ,
191
- .sched_priority = 0 ,
192
- /*
193
- * Fake (unused) bandwidth; workaround to "fix"
194
- * priority inheritance.
195
- */
196
- .sched_runtime = 1000000 ,
197
- .sched_deadline = 10000000 ,
198
- .sched_period = 10000000 ,
199
- };
200
- int ret ;
201
-
202
- if (cppc_cpufreq_driver .get == hisi_cppc_cpufreq_get_rate )
203
- return ;
204
-
205
- if (fie_disabled )
206
- return ;
207
-
208
- kworker_fie = kthread_create_worker (0 , "cppc_fie" );
209
- if (IS_ERR (kworker_fie ))
210
- return ;
211
-
212
- ret = sched_setattr_nocheck (kworker_fie -> task , & attr );
213
- if (ret ) {
214
- pr_warn ("%s: failed to set SCHED_DEADLINE: %d\n" , __func__ ,
215
- ret );
216
- kthread_destroy_worker (kworker_fie );
217
- return ;
218
- }
219
-
220
- /* Register for freq-invariance */
221
- topology_set_scale_freq_source (& cppc_sftd , cpu_present_mask );
222
- }
223
-
224
- static void cppc_freq_invariance_exit (void )
225
- {
226
- struct cppc_freq_invariance * cppc_fi ;
227
- int i ;
228
-
229
- if (cppc_cpufreq_driver .get == hisi_cppc_cpufreq_get_rate )
230
- return ;
231
-
232
- if (fie_disabled )
233
- return ;
234
-
235
- topology_clear_scale_freq_source (SCALE_FREQ_SOURCE_CPPC , cpu_present_mask );
236
-
237
- for_each_possible_cpu (i ) {
238
- cppc_fi = & per_cpu (cppc_freq_inv , i );
239
- irq_work_sync (& cppc_fi -> irq_work );
240
- }
241
-
242
- kthread_destroy_worker (kworker_fie );
243
- kworker_fie = NULL ;
244
- }
245
-
246
- #else
247
- static inline void
248
- cppc_freq_invariance_policy_init (struct cpufreq_policy * policy ,
249
- struct cppc_cpudata * cpu_data )
250
- {
251
- }
252
-
253
- static inline void cppc_freq_invariance_init (void )
254
- {
255
- }
256
-
257
- static inline void cppc_freq_invariance_exit (void )
258
- {
259
- }
260
- #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
261
-
262
60
/* Callback function used to retrieve the max frequency from DMI */
263
61
static void cppc_find_dmi_mhz (const struct dmi_header * dm , void * private )
264
62
{
@@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
547
345
cpu_data -> perf_ctrls .desired_perf = caps -> highest_perf ;
548
346
549
347
ret = cppc_set_perf (cpu , & cpu_data -> perf_ctrls );
550
- if (ret ) {
348
+ if (ret )
551
349
pr_debug ("Err setting perf value:%d on CPU:%d. ret:%d\n" ,
552
350
caps -> highest_perf , cpu , ret );
553
- } else {
554
- cppc_freq_invariance_policy_init (policy , cpu_data );
555
- }
556
351
557
352
return ret ;
558
353
}
@@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
565
360
return (u32 )t1 - (u32 )t0 ;
566
361
}
567
362
568
- static int cppc_perf_from_fbctrs (struct cppc_cpudata * cpu_data ,
569
- struct cppc_perf_fb_ctrs fb_ctrs_t0 ,
570
- struct cppc_perf_fb_ctrs fb_ctrs_t1 )
363
+ static int cppc_get_rate_from_fbctrs (struct cppc_cpudata * cpu_data ,
364
+ struct cppc_perf_fb_ctrs fb_ctrs_t0 ,
365
+ struct cppc_perf_fb_ctrs fb_ctrs_t1 )
571
366
{
572
367
u64 delta_reference , delta_delivered ;
573
- u64 reference_perf ;
368
+ u64 reference_perf , delivered_perf ;
574
369
575
370
reference_perf = fb_ctrs_t0 .reference_perf ;
576
371
@@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
579
374
delta_delivered = get_delta (fb_ctrs_t1 .delivered ,
580
375
fb_ctrs_t0 .delivered );
581
376
582
- /* Check to avoid divide-by zero and invalid delivered_perf */
583
- if (!delta_reference || !delta_delivered )
584
- return cpu_data -> perf_ctrls .desired_perf ;
585
-
586
- return (reference_perf * delta_delivered ) / delta_reference ;
587
- }
588
-
589
- static int cppc_get_rate_from_fbctrs (struct cppc_cpudata * cpu_data ,
590
- struct cppc_perf_fb_ctrs fb_ctrs_t0 ,
591
- struct cppc_perf_fb_ctrs fb_ctrs_t1 )
592
- {
593
- u64 delivered_perf ;
594
-
595
- delivered_perf = cppc_perf_from_fbctrs (cpu_data , fb_ctrs_t0 ,
596
- fb_ctrs_t1 );
377
+ /* Check to avoid divide-by zero */
378
+ if (delta_reference || delta_delivered )
379
+ delivered_perf = (reference_perf * delta_delivered ) /
380
+ delta_reference ;
381
+ else
382
+ delivered_perf = cpu_data -> perf_ctrls .desired_perf ;
597
383
598
384
return cppc_cpufreq_perf_to_khz (cpu_data , delivered_perf );
599
385
}
@@ -718,20 +504,14 @@ static void cppc_check_hisi_workaround(void)
718
504
719
505
static int __init cppc_cpufreq_init (void )
720
506
{
721
- int ret ;
722
-
723
507
if ((acpi_disabled ) || !acpi_cpc_valid ())
724
508
return - ENODEV ;
725
509
726
510
INIT_LIST_HEAD (& cpu_data_list );
727
511
728
512
cppc_check_hisi_workaround ();
729
513
730
- ret = cpufreq_register_driver (& cppc_cpufreq_driver );
731
- if (!ret )
732
- cppc_freq_invariance_init ();
733
-
734
- return ret ;
514
+ return cpufreq_register_driver (& cppc_cpufreq_driver );
735
515
}
736
516
737
517
static inline void free_cpu_data (void )
@@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
748
528
749
529
static void __exit cppc_cpufreq_exit (void )
750
530
{
751
- cppc_freq_invariance_exit ();
752
531
cpufreq_unregister_driver (& cppc_cpufreq_driver );
753
532
754
533
free_cpu_data ();
0 commit comments