Skip to content

Commit 45007c6

Browse files
jlelliPeter Zijlstra
authored andcommitted
sched/deadline: Generalize unique visiting of root domains
Bandwidth checks and updates that work on root domains currently employ a cookie mechanism for efficiency. This mechanism is very much tied to when root domains are first created and initialized. Generalize the cookie mechanism so that it can be used also later at runtime while updating root domains. Also, additionally guard it with sched_domains_mutex, since domains need to be stable while updating them (and it will be required for further dynamic changes). Fixes: 53916d5 ("sched/deadline: Check bandwidth overflow earlier for hotplug") Reported-by: Jon Hunter <[email protected]> Signed-off-by: Juri Lelli <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Tested-by: Waiman Long <[email protected]> Tested-by: Jon Hunter <[email protected]> Tested-by: Dietmar Eggemann <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 5620933 commit 45007c6

File tree

5 files changed

+20
-12
lines changed

5 files changed

+20
-12
lines changed

include/linux/sched/deadline.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,4 +37,7 @@ extern void dl_clear_root_domain(struct root_domain *rd);
3737

3838
#endif /* CONFIG_SMP */
3939

40+
extern u64 dl_cookie;
41+
extern bool dl_bw_visited(int cpu, u64 cookie);
42+
4043
#endif /* _LINUX_SCHED_DEADLINE_H */

kernel/sched/deadline.c

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -166,14 +166,14 @@ static inline unsigned long dl_bw_capacity(int i)
166166
}
167167
}
168168

169-
static inline bool dl_bw_visited(int cpu, u64 gen)
169+
static inline bool dl_bw_visited(int cpu, u64 cookie)
170170
{
171171
struct root_domain *rd = cpu_rq(cpu)->rd;
172172

173-
if (rd->visit_gen == gen)
173+
if (rd->visit_cookie == cookie)
174174
return true;
175175

176-
rd->visit_gen = gen;
176+
rd->visit_cookie = cookie;
177177
return false;
178178
}
179179

@@ -207,7 +207,7 @@ static inline unsigned long dl_bw_capacity(int i)
207207
return SCHED_CAPACITY_SCALE;
208208
}
209209

210-
static inline bool dl_bw_visited(int cpu, u64 gen)
210+
static inline bool dl_bw_visited(int cpu, u64 cookie)
211211
{
212212
return false;
213213
}
@@ -3171,15 +3171,18 @@ DEFINE_SCHED_CLASS(dl) = {
31713171
#endif
31723172
};
31733173

3174-
/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
3175-
static u64 dl_generation;
3174+
/*
3175+
* Used for dl_bw check and update, used under sched_rt_handler()::mutex and
3176+
* sched_domains_mutex.
3177+
*/
3178+
u64 dl_cookie;
31763179

31773180
int sched_dl_global_validate(void)
31783181
{
31793182
u64 runtime = global_rt_runtime();
31803183
u64 period = global_rt_period();
31813184
u64 new_bw = to_ratio(period, runtime);
3182-
u64 gen = ++dl_generation;
3185+
u64 cookie = ++dl_cookie;
31833186
struct dl_bw *dl_b;
31843187
int cpu, cpus, ret = 0;
31853188
unsigned long flags;
@@ -3192,7 +3195,7 @@ int sched_dl_global_validate(void)
31923195
for_each_online_cpu(cpu) {
31933196
rcu_read_lock_sched();
31943197

3195-
if (dl_bw_visited(cpu, gen))
3198+
if (dl_bw_visited(cpu, cookie))
31963199
goto next;
31973200

31983201
dl_b = dl_bw_of(cpu);
@@ -3229,7 +3232,7 @@ static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
32293232
void sched_dl_do_global(void)
32303233
{
32313234
u64 new_bw = -1;
3232-
u64 gen = ++dl_generation;
3235+
u64 cookie = ++dl_cookie;
32333236
struct dl_bw *dl_b;
32343237
int cpu;
32353238
unsigned long flags;
@@ -3240,7 +3243,7 @@ void sched_dl_do_global(void)
32403243
for_each_possible_cpu(cpu) {
32413244
rcu_read_lock_sched();
32423245

3243-
if (dl_bw_visited(cpu, gen)) {
3246+
if (dl_bw_visited(cpu, cookie)) {
32443247
rcu_read_unlock_sched();
32453248
continue;
32463249
}

kernel/sched/rt.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2910,6 +2910,7 @@ static int sched_rt_handler(const struct ctl_table *table, int write, void *buff
29102910
int ret;
29112911

29122912
mutex_lock(&mutex);
2913+
sched_domains_mutex_lock();
29132914
old_period = sysctl_sched_rt_period;
29142915
old_runtime = sysctl_sched_rt_runtime;
29152916

@@ -2936,6 +2937,7 @@ static int sched_rt_handler(const struct ctl_table *table, int write, void *buff
29362937
sysctl_sched_rt_period = old_period;
29372938
sysctl_sched_rt_runtime = old_runtime;
29382939
}
2940+
sched_domains_mutex_unlock();
29392941
mutex_unlock(&mutex);
29402942

29412943
return ret;

kernel/sched/sched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -998,7 +998,7 @@ struct root_domain {
998998
* Also, some corner cases, like 'wrap around' is dangerous, but given
999999
* that u64 is 'big enough'. So that shouldn't be a concern.
10001000
*/
1001-
u64 visit_gen;
1001+
u64 visit_cookie;
10021002

10031003
#ifdef HAVE_RT_PUSH_IPI
10041004
/*

kernel/sched/topology.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -568,7 +568,7 @@ static int init_rootdomain(struct root_domain *rd)
568568
rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
569569
#endif
570570

571-
rd->visit_gen = 0;
571+
rd->visit_cookie = 0;
572572
init_dl_bw(&rd->dl_bw);
573573
if (cpudl_init(&rd->cpudl) != 0)
574574
goto free_rto_mask;

0 commit comments

Comments
 (0)