Skip to content

Commit 6d5afdc

Browse files
lukaszluba-armrafaeljw
authored andcommitted
cpufreq: schedutil: Move max CPU capacity to sugov_policy
There is no need to keep the max CPU capacity in the per_cpu instance. Furthermore, there is no need to check and update that variable (sg_cpu->max) every time in the frequency change request, which is part of hot path. Instead use struct sugov_policy to store that information. Initialize the max CPU capacity during the setup and start callback. We can do that since all CPUs in the same frequency domain have the same max capacity (capacity setup and thermal pressure are based on that). Acked-by: Viresh Kumar <[email protected]> Signed-off-by: Lukasz Luba <[email protected]> Signed-off-by: Rafael J. Wysocki <[email protected]>
1 parent 1c23f9e commit 6d5afdc

File tree

1 file changed

+15
-15
lines changed

1 file changed

+15
-15
lines changed

kernel/sched/cpufreq_schedutil.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ struct sugov_policy {
2525
unsigned int next_freq;
2626
unsigned int cached_raw_freq;
2727

28+
/* max CPU capacity, which is equal for all CPUs in freq. domain */
29+
unsigned long max;
30+
2831
/* The next fields are only needed if fast switch cannot be used: */
2932
struct irq_work irq_work;
3033
struct kthread_work work;
@@ -48,7 +51,6 @@ struct sugov_cpu {
4851

4952
unsigned long util;
5053
unsigned long bw_dl;
51-
unsigned long max;
5254

5355
/* The field below is for single-CPU policies only: */
5456
#ifdef CONFIG_NO_HZ_COMMON
@@ -158,7 +160,6 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
158160
{
159161
struct rq *rq = cpu_rq(sg_cpu->cpu);
160162

161-
sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
162163
sg_cpu->bw_dl = cpu_bw_dl(rq);
163164
sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
164165
FREQUENCY_UTIL, NULL);
@@ -253,6 +254,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
253254
*/
254255
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
255256
{
257+
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
256258
unsigned long boost;
257259

258260
/* No boost currently required */
@@ -280,7 +282,8 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
280282
* sg_cpu->util is already in capacity scale; convert iowait_boost
281283
* into the same scale so we can compare.
282284
*/
283-
boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
285+
boost = sg_cpu->iowait_boost * sg_policy->max;
286+
boost >>= SCHED_CAPACITY_SHIFT;
284287
boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
285288
if (sg_cpu->util < boost)
286289
sg_cpu->util = boost;
@@ -337,7 +340,7 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
337340
if (!sugov_update_single_common(sg_cpu, time, flags))
338341
return;
339342

340-
next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
343+
next_f = get_next_freq(sg_policy, sg_cpu->util, sg_policy->max);
341344
/*
342345
* Do not reduce the frequency if the CPU has not been idle
343346
* recently, as the reduction is likely to be premature then.
@@ -373,6 +376,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
373376
unsigned int flags)
374377
{
375378
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
379+
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
376380
unsigned long prev_util = sg_cpu->util;
377381

378382
/*
@@ -399,7 +403,8 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
399403
sg_cpu->util = prev_util;
400404

401405
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
402-
map_util_perf(sg_cpu->util), sg_cpu->max);
406+
map_util_perf(sg_cpu->util),
407+
sg_policy->max);
403408

404409
sg_cpu->sg_policy->last_freq_update_time = time;
405410
}
@@ -408,25 +413,19 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
408413
{
409414
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
410415
struct cpufreq_policy *policy = sg_policy->policy;
411-
unsigned long util = 0, max = 1;
416+
unsigned long util = 0;
412417
unsigned int j;
413418

414419
for_each_cpu(j, policy->cpus) {
415420
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
416-
unsigned long j_util, j_max;
417421

418422
sugov_get_util(j_sg_cpu);
419423
sugov_iowait_apply(j_sg_cpu, time);
420-
j_util = j_sg_cpu->util;
421-
j_max = j_sg_cpu->max;
422424

423-
if (j_util * max > j_max * util) {
424-
util = j_util;
425-
max = j_max;
426-
}
425+
util = max(j_sg_cpu->util, util);
427426
}
428427

429-
return get_next_freq(sg_policy, util, max);
428+
return get_next_freq(sg_policy, util, sg_policy->max);
430429
}
431430

432431
static void
@@ -752,14 +751,15 @@ static int sugov_start(struct cpufreq_policy *policy)
752751
{
753752
struct sugov_policy *sg_policy = policy->governor_data;
754753
void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
755-
unsigned int cpu;
754+
unsigned int cpu = cpumask_first(policy->cpus);
756755

757756
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
758757
sg_policy->last_freq_update_time = 0;
759758
sg_policy->next_freq = 0;
760759
sg_policy->work_in_progress = false;
761760
sg_policy->limits_changed = false;
762761
sg_policy->cached_raw_freq = 0;
762+
sg_policy->max = arch_scale_cpu_capacity(cpu);
763763

764764
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
765765

0 commit comments

Comments
 (0)