Skip to content

Commit 15125a2

Browse files
author
Ingo Molnar
committed
sched/smp: Use the SMP version of the RT scheduling class
Simplify the scheduler by making CONFIG_SMP=y primitives and data structures unconditional in the RT policies scheduler. Signed-off-by: Ingo Molnar <[email protected]> Acked-by: Peter Zijlstra <[email protected]> Cc: Dietmar Eggemann <[email protected]> Cc: Juri Lelli <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Cc: Shrikanth Hegde <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Valentin Schneider <[email protected]> Cc: Vincent Guittot <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 74063c1 commit 15125a2

File tree

2 files changed

+0
-74
lines changed

2 files changed

+0
-74
lines changed

kernel/sched/rt.c

Lines changed: 0 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -78,12 +78,10 @@ void init_rt_rq(struct rt_rq *rt_rq)
7878
/* delimiter for bitsearch: */
7979
__set_bit(MAX_RT_PRIO, array->bitmap);
8080

81-
#if defined CONFIG_SMP
8281
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
8382
rt_rq->highest_prio.next = MAX_RT_PRIO-1;
8483
rt_rq->overloaded = 0;
8584
plist_head_init(&rt_rq->pushable_tasks);
86-
#endif /* CONFIG_SMP */
8785
/* We start is dequeued state, because no RT tasks are queued */
8886
rt_rq->rt_queued = 0;
8987

@@ -332,8 +330,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
332330
}
333331
#endif /* !CONFIG_RT_GROUP_SCHED */
334332

335-
#ifdef CONFIG_SMP
336-
337333
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
338334
{
339335
/* Try to pull RT tasks here if we lower this rq's prio */
@@ -433,21 +429,6 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
433429
}
434430
}
435431

436-
#else /* !CONFIG_SMP: */
437-
438-
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
439-
{
440-
}
441-
442-
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
443-
{
444-
}
445-
446-
static inline void rt_queue_push_tasks(struct rq *rq)
447-
{
448-
}
449-
#endif /* !CONFIG_SMP */
450-
451432
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
452433
static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
453434

@@ -597,17 +578,10 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
597578
return p->prio != p->normal_prio;
598579
}
599580

600-
#ifdef CONFIG_SMP
601581
static inline const struct cpumask *sched_rt_period_mask(void)
602582
{
603583
return this_rq()->rd->span;
604584
}
605-
#else
606-
static inline const struct cpumask *sched_rt_period_mask(void)
607-
{
608-
return cpu_online_mask;
609-
}
610-
#endif
611585

612586
static inline
613587
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
@@ -628,7 +602,6 @@ bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
628602
rt_rq->rt_time < rt_b->rt_runtime);
629603
}
630604

631-
#ifdef CONFIG_SMP
632605
/*
633606
* We ran out of runtime, see if we can borrow some from our neighbours.
634607
*/
@@ -801,9 +774,6 @@ static void balance_runtime(struct rt_rq *rt_rq)
801774
raw_spin_lock(&rt_rq->rt_runtime_lock);
802775
}
803776
}
804-
#else /* !CONFIG_SMP: */
805-
static inline void balance_runtime(struct rt_rq *rt_rq) {}
806-
#endif /* !CONFIG_SMP */
807777

808778
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
809779
{
@@ -980,10 +950,8 @@ struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
980950
return &cpu_rq(cpu)->rt;
981951
}
982952

983-
#ifdef CONFIG_SMP
984953
static void __enable_runtime(struct rq *rq) { }
985954
static void __disable_runtime(struct rq *rq) { }
986-
#endif
987955

988956
#endif /* !CONFIG_RT_GROUP_SCHED */
989957

@@ -1078,8 +1046,6 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
10781046
cpufreq_update_util(rq, 0);
10791047
}
10801048

1081-
#if defined CONFIG_SMP
1082-
10831049
static void
10841050
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
10851051
{
@@ -1110,16 +1076,6 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
11101076
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
11111077
}
11121078

1113-
#else /* !CONFIG_SMP: */
1114-
1115-
static inline
1116-
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1117-
static inline
1118-
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1119-
1120-
#endif /* !CONFIG_SMP */
1121-
1122-
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
11231079
static void
11241080
inc_rt_prio(struct rt_rq *rt_rq, int prio)
11251081
{
@@ -1158,13 +1114,6 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio)
11581114
dec_rt_prio_smp(rt_rq, prio, prev_prio);
11591115
}
11601116

1161-
#else /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED): */
1162-
1163-
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1164-
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1165-
1166-
#endif /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED) */
1167-
11681117
#ifdef CONFIG_RT_GROUP_SCHED
11691118

11701119
static void
@@ -1541,7 +1490,6 @@ static void yield_task_rt(struct rq *rq)
15411490
requeue_task_rt(rq, rq->curr, 0);
15421491
}
15431492

1544-
#ifdef CONFIG_SMP
15451493
static int find_lowest_rq(struct task_struct *task);
15461494

15471495
static int
@@ -1656,7 +1604,6 @@ static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
16561604

16571605
return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
16581606
}
1659-
#endif /* CONFIG_SMP */
16601607

16611608
/*
16621609
* Preempt the current task with a newly woken task if needed:
@@ -1670,7 +1617,6 @@ static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
16701617
return;
16711618
}
16721619

1673-
#ifdef CONFIG_SMP
16741620
/*
16751621
* If:
16761622
*
@@ -1685,7 +1631,6 @@ static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
16851631
*/
16861632
if (p->prio == donor->prio && !test_tsk_need_resched(rq->curr))
16871633
check_preempt_equal_prio(rq, p);
1688-
#endif /* CONFIG_SMP */
16891634
}
16901635

16911636
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
@@ -1779,8 +1724,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_s
17791724
enqueue_pushable_task(rq, p);
17801725
}
17811726

1782-
#ifdef CONFIG_SMP
1783-
17841727
/* Only try algorithms three times */
17851728
#define RT_MAX_TRIES 3
17861729

@@ -2454,11 +2397,6 @@ void __init init_sched_rt_class(void)
24542397
GFP_KERNEL, cpu_to_node(i));
24552398
}
24562399
}
2457-
#else /* !CONFIG_SMP: */
2458-
void __init init_sched_rt_class(void)
2459-
{
2460-
}
2461-
#endif /* !CONFIG_SMP */
24622400

24632401
/*
24642402
* When switching a task to RT, we may overload the runqueue
@@ -2482,10 +2420,8 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
24822420
* then see if we can move to another run queue.
24832421
*/
24842422
if (task_on_rq_queued(p)) {
2485-
#ifdef CONFIG_SMP
24862423
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
24872424
rt_queue_push_tasks(rq);
2488-
#endif /* CONFIG_SMP */
24892425
if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq)))
24902426
resched_curr(rq);
24912427
}
@@ -2502,7 +2438,6 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
25022438
return;
25032439

25042440
if (task_current_donor(rq, p)) {
2505-
#ifdef CONFIG_SMP
25062441
/*
25072442
* If our priority decreases while running, we
25082443
* may need to pull tasks to this runqueue.
@@ -2516,11 +2451,6 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
25162451
*/
25172452
if (p->prio > rq->rt.highest_prio.curr)
25182453
resched_curr(rq);
2519-
#else /* !CONFIG_SMP: */
2520-
/* For UP simply resched on drop of prio */
2521-
if (oldprio < p->prio)
2522-
resched_curr(rq);
2523-
#endif /* !CONFIG_SMP */
25242454
} else {
25252455
/*
25262456
* This task is not running, but if it is
@@ -2641,7 +2571,6 @@ DEFINE_SCHED_CLASS(rt) = {
26412571
.put_prev_task = put_prev_task_rt,
26422572
.set_next_task = set_next_task_rt,
26432573

2644-
#ifdef CONFIG_SMP
26452574
.balance = balance_rt,
26462575
.select_task_rq = select_task_rq_rt,
26472576
.set_cpus_allowed = set_cpus_allowed_common,
@@ -2650,7 +2579,6 @@ DEFINE_SCHED_CLASS(rt) = {
26502579
.task_woken = task_woken_rt,
26512580
.switched_from = switched_from_rt,
26522581
.find_lock_rq = find_lock_lowest_rq,
2653-
#endif /* !CONFIG_SMP */
26542582

26552583
.task_tick = task_tick_rt,
26562584

kernel/sched/sched.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -792,11 +792,9 @@ struct rt_rq {
792792
int curr; /* highest queued rt task prio */
793793
int next; /* next highest */
794794
} highest_prio;
795-
#ifdef CONFIG_SMP
796795
bool overloaded;
797796
struct plist_head pushable_tasks;
798797

799-
#endif /* CONFIG_SMP */
800798
int rt_queued;
801799

802800
#ifdef CONFIG_RT_GROUP_SCHED

0 commit comments

Comments
 (0)