Skip to content

Commit 944293b

Browse files
committed
Merge tag 'pm-5.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fix from Rafael Wysocki: "Remove recently added frequency invariance support from the CPPC cpufreq driver, because it has turned out to be problematic and it cannot be fixed properly on time for 5.13 (Viresh Kumar)" * tag 'pm-5.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: Revert "cpufreq: CPPC: Add support for frequency invariance"
2 parents e2c8f8e + 771fac5 commit 944293b

File tree

4 files changed

+12
-245
lines changed

4 files changed

+12
-245
lines changed

drivers/cpufreq/Kconfig.arm

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,16 +19,6 @@ config ACPI_CPPC_CPUFREQ
1919

2020
If in doubt, say N.
2121

22-
config ACPI_CPPC_CPUFREQ_FIE
23-
bool "Frequency Invariance support for CPPC cpufreq driver"
24-
depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
25-
default y
26-
help
27-
This extends frequency invariance support in the CPPC cpufreq driver,
28-
by using CPPC delivered and reference performance counters.
29-
30-
If in doubt, say N.
31-
3222
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
3323
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
3424
depends on ARCH_SUNXI

drivers/cpufreq/cppc_cpufreq.c

Lines changed: 12 additions & 233 deletions
Original file line numberDiff line numberDiff line change
@@ -10,18 +10,14 @@
1010

1111
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
1212

13-
#include <linux/arch_topology.h>
1413
#include <linux/kernel.h>
1514
#include <linux/module.h>
1615
#include <linux/delay.h>
1716
#include <linux/cpu.h>
1817
#include <linux/cpufreq.h>
1918
#include <linux/dmi.h>
20-
#include <linux/irq_work.h>
21-
#include <linux/kthread.h>
2219
#include <linux/time.h>
2320
#include <linux/vmalloc.h>
24-
#include <uapi/linux/sched/types.h>
2521

2622
#include <asm/unaligned.h>
2723

@@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
6157
}
6258
};
6359

64-
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
65-
66-
/* Frequency invariance support */
67-
struct cppc_freq_invariance {
68-
int cpu;
69-
struct irq_work irq_work;
70-
struct kthread_work work;
71-
struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
72-
struct cppc_cpudata *cpu_data;
73-
};
74-
75-
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
76-
static struct kthread_worker *kworker_fie;
77-
static bool fie_disabled;
78-
79-
static struct cpufreq_driver cppc_cpufreq_driver;
80-
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
81-
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
82-
struct cppc_perf_fb_ctrs fb_ctrs_t0,
83-
struct cppc_perf_fb_ctrs fb_ctrs_t1);
84-
85-
/**
86-
* cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
87-
* @work: The work item.
88-
*
89-
* The CPPC driver register itself with the topology core to provide its own
90-
* implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
91-
* gets called by the scheduler on every tick.
92-
*
93-
* Note that the arch specific counters have higher priority than CPPC counters,
94-
* if available, though the CPPC driver doesn't need to have any special
95-
* handling for that.
96-
*
97-
* On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
98-
* reach here from hard-irq context), which then schedules a normal work item
99-
* and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
100-
* based on the counter updates since the last tick.
101-
*/
102-
static void cppc_scale_freq_workfn(struct kthread_work *work)
103-
{
104-
struct cppc_freq_invariance *cppc_fi;
105-
struct cppc_perf_fb_ctrs fb_ctrs = {0};
106-
struct cppc_cpudata *cpu_data;
107-
unsigned long local_freq_scale;
108-
u64 perf;
109-
110-
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
111-
cpu_data = cppc_fi->cpu_data;
112-
113-
if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
114-
pr_warn("%s: failed to read perf counters\n", __func__);
115-
return;
116-
}
117-
118-
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
119-
perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs,
120-
fb_ctrs);
121-
122-
perf <<= SCHED_CAPACITY_SHIFT;
123-
local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
124-
if (WARN_ON(local_freq_scale > 1024))
125-
local_freq_scale = 1024;
126-
127-
per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
128-
}
129-
130-
static void cppc_irq_work(struct irq_work *irq_work)
131-
{
132-
struct cppc_freq_invariance *cppc_fi;
133-
134-
cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
135-
kthread_queue_work(kworker_fie, &cppc_fi->work);
136-
}
137-
138-
static void cppc_scale_freq_tick(void)
139-
{
140-
struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
141-
142-
/*
143-
* cppc_get_perf_ctrs() can potentially sleep, call that from the right
144-
* context.
145-
*/
146-
irq_work_queue(&cppc_fi->irq_work);
147-
}
148-
149-
static struct scale_freq_data cppc_sftd = {
150-
.source = SCALE_FREQ_SOURCE_CPPC,
151-
.set_freq_scale = cppc_scale_freq_tick,
152-
};
153-
154-
static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
155-
struct cppc_cpudata *cpu_data)
156-
{
157-
struct cppc_perf_fb_ctrs fb_ctrs = {0};
158-
struct cppc_freq_invariance *cppc_fi;
159-
int i, ret;
160-
161-
if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
162-
return;
163-
164-
if (fie_disabled)
165-
return;
166-
167-
for_each_cpu(i, policy->cpus) {
168-
cppc_fi = &per_cpu(cppc_freq_inv, i);
169-
cppc_fi->cpu = i;
170-
cppc_fi->cpu_data = cpu_data;
171-
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
172-
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
173-
174-
ret = cppc_get_perf_ctrs(i, &fb_ctrs);
175-
if (ret) {
176-
pr_warn("%s: failed to read perf counters: %d\n",
177-
__func__, ret);
178-
fie_disabled = true;
179-
} else {
180-
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
181-
}
182-
}
183-
}
184-
185-
static void __init cppc_freq_invariance_init(void)
186-
{
187-
struct sched_attr attr = {
188-
.size = sizeof(struct sched_attr),
189-
.sched_policy = SCHED_DEADLINE,
190-
.sched_nice = 0,
191-
.sched_priority = 0,
192-
/*
193-
* Fake (unused) bandwidth; workaround to "fix"
194-
* priority inheritance.
195-
*/
196-
.sched_runtime = 1000000,
197-
.sched_deadline = 10000000,
198-
.sched_period = 10000000,
199-
};
200-
int ret;
201-
202-
if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
203-
return;
204-
205-
if (fie_disabled)
206-
return;
207-
208-
kworker_fie = kthread_create_worker(0, "cppc_fie");
209-
if (IS_ERR(kworker_fie))
210-
return;
211-
212-
ret = sched_setattr_nocheck(kworker_fie->task, &attr);
213-
if (ret) {
214-
pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
215-
ret);
216-
kthread_destroy_worker(kworker_fie);
217-
return;
218-
}
219-
220-
/* Register for freq-invariance */
221-
topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
222-
}
223-
224-
static void cppc_freq_invariance_exit(void)
225-
{
226-
struct cppc_freq_invariance *cppc_fi;
227-
int i;
228-
229-
if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
230-
return;
231-
232-
if (fie_disabled)
233-
return;
234-
235-
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
236-
237-
for_each_possible_cpu(i) {
238-
cppc_fi = &per_cpu(cppc_freq_inv, i);
239-
irq_work_sync(&cppc_fi->irq_work);
240-
}
241-
242-
kthread_destroy_worker(kworker_fie);
243-
kworker_fie = NULL;
244-
}
245-
246-
#else
247-
static inline void
248-
cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
249-
struct cppc_cpudata *cpu_data)
250-
{
251-
}
252-
253-
static inline void cppc_freq_invariance_init(void)
254-
{
255-
}
256-
257-
static inline void cppc_freq_invariance_exit(void)
258-
{
259-
}
260-
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
261-
26260
/* Callback function used to retrieve the max frequency from DMI */
26361
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
26462
{
@@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
547345
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
548346

549347
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
550-
if (ret) {
348+
if (ret)
551349
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
552350
caps->highest_perf, cpu, ret);
553-
} else {
554-
cppc_freq_invariance_policy_init(policy, cpu_data);
555-
}
556351

557352
return ret;
558353
}
@@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
565360
return (u32)t1 - (u32)t0;
566361
}
567362

568-
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
569-
struct cppc_perf_fb_ctrs fb_ctrs_t0,
570-
struct cppc_perf_fb_ctrs fb_ctrs_t1)
363+
static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
364+
struct cppc_perf_fb_ctrs fb_ctrs_t0,
365+
struct cppc_perf_fb_ctrs fb_ctrs_t1)
571366
{
572367
u64 delta_reference, delta_delivered;
573-
u64 reference_perf;
368+
u64 reference_perf, delivered_perf;
574369

575370
reference_perf = fb_ctrs_t0.reference_perf;
576371

@@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
579374
delta_delivered = get_delta(fb_ctrs_t1.delivered,
580375
fb_ctrs_t0.delivered);
581376

582-
/* Check to avoid divide-by zero and invalid delivered_perf */
583-
if (!delta_reference || !delta_delivered)
584-
return cpu_data->perf_ctrls.desired_perf;
585-
586-
return (reference_perf * delta_delivered) / delta_reference;
587-
}
588-
589-
static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
590-
struct cppc_perf_fb_ctrs fb_ctrs_t0,
591-
struct cppc_perf_fb_ctrs fb_ctrs_t1)
592-
{
593-
u64 delivered_perf;
594-
595-
delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0,
596-
fb_ctrs_t1);
377+
/* Check to avoid divide-by zero */
378+
if (delta_reference || delta_delivered)
379+
delivered_perf = (reference_perf * delta_delivered) /
380+
delta_reference;
381+
else
382+
delivered_perf = cpu_data->perf_ctrls.desired_perf;
597383

598384
return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
599385
}
@@ -718,20 +504,14 @@ static void cppc_check_hisi_workaround(void)
718504

719505
static int __init cppc_cpufreq_init(void)
720506
{
721-
int ret;
722-
723507
if ((acpi_disabled) || !acpi_cpc_valid())
724508
return -ENODEV;
725509

726510
INIT_LIST_HEAD(&cpu_data_list);
727511

728512
cppc_check_hisi_workaround();
729513

730-
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
731-
if (!ret)
732-
cppc_freq_invariance_init();
733-
734-
return ret;
514+
return cpufreq_register_driver(&cppc_cpufreq_driver);
735515
}
736516

737517
static inline void free_cpu_data(void)
@@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
748528

749529
static void __exit cppc_cpufreq_exit(void)
750530
{
751-
cppc_freq_invariance_exit();
752531
cpufreq_unregister_driver(&cppc_cpufreq_driver);
753532

754533
free_cpu_data();

include/linux/arch_topology.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ bool topology_scale_freq_invariant(void);
3737
enum scale_freq_source {
3838
SCALE_FREQ_SOURCE_CPUFREQ = 0,
3939
SCALE_FREQ_SOURCE_ARCH,
40-
SCALE_FREQ_SOURCE_CPPC,
4140
};
4241

4342
struct scale_freq_data {

kernel/sched/core.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6389,7 +6389,6 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
63896389
{
63906390
return __sched_setscheduler(p, attr, false, true);
63916391
}
6392-
EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
63936392

63946393
/**
63956394
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.

0 commit comments

Comments
 (0)