Skip to content

Commit 64eaf50

Browse files
Chengming ZhouPeter Zijlstra
authored andcommitted
sched/fair: Fix cfs_rq_clock_pelt() for throttled cfs_rq
Since commit 2312729 ("sched/fair: Update scale invariance of PELT") change to use rq_clock_pelt() instead of rq_clock_task(), we should also use rq_clock_pelt() for throttled_clock_task_time and throttled_clock_task accounting to get correct cfs_rq_clock_pelt() of throttled cfs_rq. And rename throttled_clock_task(_time) to be clock_pelt rather than clock_task. Fixes: 2312729 ("sched/fair: Update scale invariance of PELT") Signed-off-by: Chengming Zhou <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Ben Segall <[email protected]> Reviewed-by: Vincent Guittot <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 0635490 commit 64eaf50

File tree

3 files changed

+8
-8
lines changed

3 files changed

+8
-8
lines changed

kernel/sched/fair.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4846,8 +4846,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
48464846

48474847
cfs_rq->throttle_count--;
48484848
if (!cfs_rq->throttle_count) {
4849-
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
4850-
cfs_rq->throttled_clock_task;
4849+
cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
4850+
cfs_rq->throttled_clock_pelt;
48514851

48524852
/* Add cfs_rq with load or one or more already running entities to the list */
48534853
if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
@@ -4864,7 +4864,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
48644864

48654865
/* group is entering throttled state, stop time */
48664866
if (!cfs_rq->throttle_count) {
4867-
cfs_rq->throttled_clock_task = rq_clock_task(rq);
4867+
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
48684868
list_del_leaf_cfs_rq(cfs_rq);
48694869
}
48704870
cfs_rq->throttle_count++;
@@ -5308,7 +5308,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
53085308
pcfs_rq = tg->parent->cfs_rq[cpu];
53095309

53105310
cfs_rq->throttle_count = pcfs_rq->throttle_count;
5311-
cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
5311+
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
53125312
}
53135313

53145314
/* conditionally throttle active cfs_rq's from put_prev_entity() */

kernel/sched/pelt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,9 +145,9 @@ static inline u64 rq_clock_pelt(struct rq *rq)
145145
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
146146
{
147147
if (unlikely(cfs_rq->throttle_count))
148-
return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
148+
return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
149149

150-
return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
150+
return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
151151
}
152152
#else
153153
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)

kernel/sched/sched.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -603,8 +603,8 @@ struct cfs_rq {
603603
s64 runtime_remaining;
604604

605605
u64 throttled_clock;
606-
u64 throttled_clock_task;
607-
u64 throttled_clock_task_time;
606+
u64 throttled_clock_pelt;
607+
u64 throttled_clock_pelt_time;
608608
int throttled;
609609
int throttle_count;
610610
struct list_head throttled_list;

0 commit comments

Comments
 (0)