Skip to content

Commit 87e867b

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/pelt: Cleanup PELT divider
Factorize in a single place the calculation of the divider to be used to to compute *_avg from *_sum value Suggested-by: Dietmar Eggemann <[email protected]> Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent c496941 commit 87e867b

File tree

3 files changed

+24
-15
lines changed

3 files changed

+24
-15
lines changed

kernel/sched/fair.c

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3094,7 +3094,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
30943094

30953095
#ifdef CONFIG_SMP
30963096
do {
3097-
u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
3097+
u32 divider = get_pelt_divider(&se->avg);
30983098

30993099
se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
31003100
} while (0);
@@ -3440,16 +3440,18 @@ static inline void
34403440
update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
34413441
{
34423442
long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3443-
/*
3444-
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3445-
* See ___update_load_avg() for details.
3446-
*/
3447-
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
3443+
u32 divider;
34483444

34493445
/* Nothing to update */
34503446
if (!delta)
34513447
return;
34523448

3449+
/*
3450+
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3451+
* See ___update_load_avg() for details.
3452+
*/
3453+
divider = get_pelt_divider(&cfs_rq->avg);
3454+
34533455
/* Set new sched_entity's utilization */
34543456
se->avg.util_avg = gcfs_rq->avg.util_avg;
34553457
se->avg.util_sum = se->avg.util_avg * divider;
@@ -3463,16 +3465,18 @@ static inline void
34633465
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
34643466
{
34653467
long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3466-
/*
3467-
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3468-
* See ___update_load_avg() for details.
3469-
*/
3470-
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
3468+
u32 divider;
34713469

34723470
/* Nothing to update */
34733471
if (!delta)
34743472
return;
34753473

3474+
/*
3475+
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3476+
* See ___update_load_avg() for details.
3477+
*/
3478+
divider = get_pelt_divider(&cfs_rq->avg);
3479+
34763480
/* Set new sched_entity's runnable */
34773481
se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
34783482
se->avg.runnable_sum = se->avg.runnable_avg * divider;
@@ -3500,7 +3504,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
35003504
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
35013505
* See ___update_load_avg() for details.
35023506
*/
3503-
divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
3507+
divider = get_pelt_divider(&cfs_rq->avg);
35043508

35053509
if (runnable_sum >= 0) {
35063510
/*
@@ -3646,7 +3650,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
36463650

36473651
if (cfs_rq->removed.nr) {
36483652
unsigned long r;
3649-
u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
3653+
u32 divider = get_pelt_divider(&cfs_rq->avg);
36503654

36513655
raw_spin_lock(&cfs_rq->removed.lock);
36523656
swap(cfs_rq->removed.util_avg, removed_util);
@@ -3701,7 +3705,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
37013705
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
37023706
* See ___update_load_avg() for details.
37033707
*/
3704-
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
3708+
u32 divider = get_pelt_divider(&cfs_rq->avg);
37053709

37063710
/*
37073711
* When we attach the @se to the @cfs_rq, we must align the decay

kernel/sched/pelt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
262262
static __always_inline void
263263
___update_load_avg(struct sched_avg *sa, unsigned long load)
264264
{
265-
u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
265+
u32 divider = get_pelt_divider(sa);
266266

267267
/*
268268
* Step 2: update *_avg.

kernel/sched/pelt.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running)
3737
}
3838
#endif
3939

40+
static inline u32 get_pelt_divider(struct sched_avg *avg)
41+
{
42+
return LOAD_AVG_MAX - 1024 + avg->period_contrib;
43+
}
44+
4045
/*
4146
* When a task is dequeued, its estimated utilization should not be update if
4247
* its util_avg has not been updated at least once.

0 commit comments

Comments
 (0)