Skip to content

Commit 5c66d1b

Browse files
vianplPeter Zijlstra
authored andcommitted
nohz/full, sched/rt: Fix missed tick-reenabling bug in dequeue_task_rt()
dequeue_task_rt() only decrements 'rt_rq->rt_nr_running' after having called sched_update_tick_dependency() preventing it from re-enabling the tick on systems that no longer have pending SCHED_RT tasks but have multiple runnable SCHED_OTHER tasks: dequeue_task_rt() dequeue_rt_entity() dequeue_rt_stack() dequeue_top_rt_rq() sub_nr_running() // decrements rq->nr_running sched_update_tick_dependency() sched_can_stop_tick() // checks rq->rt.rt_nr_running, ... __dequeue_rt_entity() dec_rt_tasks() // decrements rq->rt.rt_nr_running ... Every other scheduler class performs the operation in the opposite order, and sched_update_tick_dependency() expects the values to be updated as such. So avoid the misbehaviour by inverting the order in which the above operations are performed in the RT scheduler. Fixes: 76d92ac ("sched: Migrate sched to use new tick dependency mask model") Signed-off-by: Nicolas Saenz Julienne <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Reviewed-by: Phil Auld <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 401e496 commit 5c66d1b

File tree

1 file changed

+9
-6
lines changed

1 file changed

+9
-6
lines changed

kernel/sched/rt.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ static inline void rt_queue_push_tasks(struct rq *rq)
480480
#endif /* CONFIG_SMP */
481481

482482
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
483-
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
483+
static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
484484

485485
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
486486
{
@@ -601,7 +601,7 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
601601
rt_se = rt_rq->tg->rt_se[cpu];
602602

603603
if (!rt_se) {
604-
dequeue_top_rt_rq(rt_rq);
604+
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
605605
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
606606
cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
607607
}
@@ -687,7 +687,7 @@ static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
687687

688688
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
689689
{
690-
dequeue_top_rt_rq(rt_rq);
690+
dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
691691
}
692692

693693
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
@@ -1089,7 +1089,7 @@ static void update_curr_rt(struct rq *rq)
10891089
}
10901090

10911091
static void
1092-
dequeue_top_rt_rq(struct rt_rq *rt_rq)
1092+
dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
10931093
{
10941094
struct rq *rq = rq_of_rt_rq(rt_rq);
10951095

@@ -1100,7 +1100,7 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
11001100

11011101
BUG_ON(!rq->nr_running);
11021102

1103-
sub_nr_running(rq, rt_rq->rt_nr_running);
1103+
sub_nr_running(rq, count);
11041104
rt_rq->rt_queued = 0;
11051105

11061106
}
@@ -1486,18 +1486,21 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flag
14861486
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
14871487
{
14881488
struct sched_rt_entity *back = NULL;
1489+
unsigned int rt_nr_running;
14891490

14901491
for_each_sched_rt_entity(rt_se) {
14911492
rt_se->back = back;
14921493
back = rt_se;
14931494
}
14941495

1495-
dequeue_top_rt_rq(rt_rq_of_se(back));
1496+
rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
14961497

14971498
for (rt_se = back; rt_se; rt_se = rt_se->back) {
14981499
if (on_rt_rq(rt_se))
14991500
__dequeue_rt_entity(rt_se, flags);
15001501
}
1502+
1503+
dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
15011504
}
15021505

15031506
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)

0 commit comments

Comments
 (0)