Skip to content

Commit 61d3164

Browse files
WerkovPeter Zijlstra
authored andcommitted
sched: Skip non-root task_groups with disabled RT_GROUP_SCHED
First, we want to prevent placement of RT tasks on non-root rt_rqs which we achieve in the task migration code that'd fall back to root_task_group's rt_rq. Second, we want to work with only root_task_group's rt_rq when iterating all "real" rt_rqs when RT_GROUP is disabled. To achieve this we keep root_task_group as the first one on the task_groups and break out quickly. Signed-off-by: Michal Koutný <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent e34e013 commit 61d3164

File tree

3 files changed

+14
-4
lines changed

3 files changed

+14
-4
lines changed

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9020,7 +9020,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
90209020
unsigned long flags;
90219021

90229022
spin_lock_irqsave(&task_group_lock, flags);
9023-
list_add_rcu(&tg->list, &task_groups);
9023+
list_add_tail_rcu(&tg->list, &task_groups);
90249024

90259025
/* Root should already exist: */
90269026
WARN_ON(!parent);

kernel/sched/rt.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -495,6 +495,9 @@ typedef struct task_group *rt_rq_iter_t;
495495

496496
static inline struct task_group *next_task_group(struct task_group *tg)
497497
{
498+
if (!rt_group_sched_enabled())
499+
return NULL;
500+
498501
do {
499502
tg = list_entry_rcu(tg->list.next,
500503
typeof(struct task_group), list);
@@ -507,9 +510,9 @@ static inline struct task_group *next_task_group(struct task_group *tg)
507510
}
508511

509512
#define for_each_rt_rq(rt_rq, iter, rq) \
510-
for (iter = container_of(&task_groups, typeof(*iter), list); \
511-
(iter = next_task_group(iter)) && \
512-
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
513+
for (iter = &root_task_group; \
514+
iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \
515+
iter = next_task_group(iter))
513516

514517
#define for_each_sched_rt_entity(rt_se) \
515518
for (; rt_se; rt_se = rt_se->parent)

kernel/sched/sched.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2165,6 +2165,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
21652165
#endif
21662166

21672167
#ifdef CONFIG_RT_GROUP_SCHED
2168+
/*
2169+
* p->rt.rt_rq is NULL initially and it is easier to assign
2170+
* root_task_group's rt_rq than switching in rt_rq_of_se()
2171+
* Clobbers tg(!)
2172+
*/
2173+
if (!rt_group_sched_enabled())
2174+
tg = &root_task_group;
21682175
p->rt.rt_rq = tg->rt_rq[cpu];
21692176
p->rt.parent = tg->rt_se[cpu];
21702177
#endif

0 commit comments

Comments
 (0)