Skip to content

Commit 87f1fb7

Browse files
WerkovPeter Zijlstra
authored andcommitted
sched: Add RT_GROUP WARN checks for non-root task_groups
With CONFIG_RT_GROUP_SCHED but runtime disabling of RT_GROUPs we expect the existence of the root task_group only and all rt_sched_entity'ies should be queued on root's rt_rq. If we get a non-root RT_GROUP something went wrong. Signed-off-by: Michal Koutný <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent d6809c2 commit 87f1fb7

File tree

1 file changed

+12
-2
lines changed

1 file changed

+12
-2
lines changed

kernel/sched/rt.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,18 +176,22 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
176176

177177
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
178178
{
179+
/* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */
180+
WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
179181
return rt_rq->rq;
180182
}
181183

182184
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
183185
{
186+
WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group);
184187
return rt_se->rt_rq;
185188
}
186189

187190
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
188191
{
189192
struct rt_rq *rt_rq = rt_se->rt_rq;
190193

194+
WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
191195
return rt_rq->rq;
192196
}
193197

@@ -504,8 +508,10 @@ typedef struct task_group *rt_rq_iter_t;
504508

505509
static inline struct task_group *next_task_group(struct task_group *tg)
506510
{
507-
if (!rt_group_sched_enabled())
511+
if (!rt_group_sched_enabled()) {
512+
WARN_ON(tg != &root_task_group);
508513
return NULL;
514+
}
509515

510516
do {
511517
tg = list_entry_rcu(tg->list.next,
@@ -2607,8 +2613,9 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu)
26072613
{
26082614
struct rt_rq *rt_rq;
26092615

2610-
#ifdef CONFIG_RT_GROUP_SCHED
2616+
#ifdef CONFIG_RT_GROUP_SCHED // XXX maybe add task_rt_rq(), see also sched_rt_period_rt_rq
26112617
rt_rq = task_group(p)->rt_rq[cpu];
2618+
WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
26122619
#else
26132620
rt_rq = &cpu_rq(cpu)->rt;
26142621
#endif
@@ -2718,6 +2725,9 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)
27182725
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
27192726
return -EBUSY;
27202727

2728+
if (WARN_ON(!rt_group_sched_enabled() && tg != &root_task_group))
2729+
return -EBUSY;
2730+
27212731
total = to_ratio(period, runtime);
27222732

27232733
/*

0 commit comments

Comments
 (0)