Skip to content

Commit 9e61d12

Browse files
committed
Merge tag 'sched-urgent-2020-05-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: "A set of fixes for the scheduler: - Fix handling of throttled parents in enqueue_task_fair() completely. The recent fix overlooked a corner case where the first iteration terminates due to an entity already being on the runqueue which makes the list management incomplete and later triggers the assertion which checks for completeness. - Fix a similar problem in unthrottle_cfs_rq(). - Show the correct uclamp values in procfs which prints the effective value twice instead of requested and effective" * tag 'sched-urgent-2020-05-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list sched/debug: Fix requested task uclamp values shown in procfs sched/fair: Fix enqueue_task_fair() warning some more
2 parents caffb99 + 39f23ce commit 9e61d12

File tree

2 files changed

+39
-14
lines changed

2 files changed

+39
-14
lines changed

kernel/sched/debug.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -948,8 +948,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
948948
P(se.avg.util_est.enqueued);
949949
#endif
950950
#ifdef CONFIG_UCLAMP_TASK
951-
__PS("uclamp.min", p->uclamp[UCLAMP_MIN].value);
952-
__PS("uclamp.max", p->uclamp[UCLAMP_MAX].value);
951+
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
952+
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
953953
__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
954954
__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
955955
#endif

kernel/sched/fair.c

Lines changed: 37 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -4774,7 +4774,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
47744774
struct rq *rq = rq_of(cfs_rq);
47754775
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
47764776
struct sched_entity *se;
4777-
int enqueue = 1;
47784777
long task_delta, idle_task_delta;
47794778

47804779
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -4798,26 +4797,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
47984797
idle_task_delta = cfs_rq->idle_h_nr_running;
47994798
for_each_sched_entity(se) {
48004799
if (se->on_rq)
4801-
enqueue = 0;
4800+
break;
4801+
cfs_rq = cfs_rq_of(se);
4802+
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4803+
4804+
cfs_rq->h_nr_running += task_delta;
4805+
cfs_rq->idle_h_nr_running += idle_task_delta;
4806+
4807+
/* end evaluation on encountering a throttled cfs_rq */
4808+
if (cfs_rq_throttled(cfs_rq))
4809+
goto unthrottle_throttle;
4810+
}
48024811

4812+
for_each_sched_entity(se) {
48034813
cfs_rq = cfs_rq_of(se);
4804-
if (enqueue) {
4805-
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4806-
} else {
4807-
update_load_avg(cfs_rq, se, 0);
4808-
se_update_runnable(se);
4809-
}
4814+
4815+
update_load_avg(cfs_rq, se, UPDATE_TG);
4816+
se_update_runnable(se);
48104817

48114818
cfs_rq->h_nr_running += task_delta;
48124819
cfs_rq->idle_h_nr_running += idle_task_delta;
48134820

4821+
4822+
/* end evaluation on encountering a throttled cfs_rq */
48144823
if (cfs_rq_throttled(cfs_rq))
4815-
break;
4824+
goto unthrottle_throttle;
4825+
4826+
/*
4827+
* One parent has been throttled and cfs_rq removed from the
4828+
* list. Add it back to not break the leaf list.
4829+
*/
4830+
if (throttled_hierarchy(cfs_rq))
4831+
list_add_leaf_cfs_rq(cfs_rq);
48164832
}
48174833

4818-
if (!se)
4819-
add_nr_running(rq, task_delta);
4834+
/* At this point se is NULL and we are at root level*/
4835+
add_nr_running(rq, task_delta);
48204836

4837+
unthrottle_throttle:
48214838
/*
48224839
* The cfs_rq_throttled() breaks in the above iteration can result in
48234840
* incomplete leaf list maintenance, resulting in triggering the
@@ -4826,7 +4843,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
48264843
for_each_sched_entity(se) {
48274844
cfs_rq = cfs_rq_of(se);
48284845

4829-
list_add_leaf_cfs_rq(cfs_rq);
4846+
if (list_add_leaf_cfs_rq(cfs_rq))
4847+
break;
48304848
}
48314849

48324850
assert_list_leaf_cfs_rq(rq);
@@ -5479,6 +5497,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
54795497
/* end evaluation on encountering a throttled cfs_rq */
54805498
if (cfs_rq_throttled(cfs_rq))
54815499
goto enqueue_throttle;
5500+
5501+
/*
5502+
* One parent has been throttled and cfs_rq removed from the
5503+
* list. Add it back to not break the leaf list.
5504+
*/
5505+
if (throttled_hierarchy(cfs_rq))
5506+
list_add_leaf_cfs_rq(cfs_rq);
54825507
}
54835508

54845509
enqueue_throttle:

0 commit comments

Comments
 (0)