@@ -78,12 +78,10 @@ void init_rt_rq(struct rt_rq *rt_rq)
78
78
/* delimiter for bitsearch: */
79
79
__set_bit (MAX_RT_PRIO , array -> bitmap );
80
80
81
- #if defined CONFIG_SMP
82
81
rt_rq -> highest_prio .curr = MAX_RT_PRIO - 1 ;
83
82
rt_rq -> highest_prio .next = MAX_RT_PRIO - 1 ;
84
83
rt_rq -> overloaded = 0 ;
85
84
plist_head_init (& rt_rq -> pushable_tasks );
86
- #endif /* CONFIG_SMP */
87
85
/* We start is dequeued state, because no RT tasks are queued */
88
86
rt_rq -> rt_queued = 0 ;
89
87
@@ -332,8 +330,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
332
330
}
333
331
#endif /* !CONFIG_RT_GROUP_SCHED */
334
332
335
- #ifdef CONFIG_SMP
336
-
337
333
static inline bool need_pull_rt_task (struct rq * rq , struct task_struct * prev )
338
334
{
339
335
/* Try to pull RT tasks here if we lower this rq's prio */
@@ -433,21 +429,6 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
433
429
}
434
430
}
435
431
436
- #else /* !CONFIG_SMP: */
437
-
438
- static inline void enqueue_pushable_task (struct rq * rq , struct task_struct * p )
439
- {
440
- }
441
-
442
- static inline void dequeue_pushable_task (struct rq * rq , struct task_struct * p )
443
- {
444
- }
445
-
446
- static inline void rt_queue_push_tasks (struct rq * rq )
447
- {
448
- }
449
- #endif /* !CONFIG_SMP */
450
-
451
432
static void enqueue_top_rt_rq (struct rt_rq * rt_rq );
452
433
static void dequeue_top_rt_rq (struct rt_rq * rt_rq , unsigned int count );
453
434
@@ -597,17 +578,10 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
597
578
return p -> prio != p -> normal_prio ;
598
579
}
599
580
600
- #ifdef CONFIG_SMP
601
581
static inline const struct cpumask * sched_rt_period_mask (void )
602
582
{
603
583
return this_rq ()-> rd -> span ;
604
584
}
605
- #else
606
- static inline const struct cpumask * sched_rt_period_mask (void )
607
- {
608
- return cpu_online_mask ;
609
- }
610
- #endif
611
585
612
586
static inline
613
587
struct rt_rq * sched_rt_period_rt_rq (struct rt_bandwidth * rt_b , int cpu )
@@ -628,7 +602,6 @@ bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
628
602
rt_rq -> rt_time < rt_b -> rt_runtime );
629
603
}
630
604
631
- #ifdef CONFIG_SMP
632
605
/*
633
606
* We ran out of runtime, see if we can borrow some from our neighbours.
634
607
*/
@@ -801,9 +774,6 @@ static void balance_runtime(struct rt_rq *rt_rq)
801
774
raw_spin_lock (& rt_rq -> rt_runtime_lock );
802
775
}
803
776
}
804
- #else /* !CONFIG_SMP: */
805
- static inline void balance_runtime (struct rt_rq * rt_rq ) {}
806
- #endif /* !CONFIG_SMP */
807
777
808
778
static int do_sched_rt_period_timer (struct rt_bandwidth * rt_b , int overrun )
809
779
{
@@ -980,10 +950,8 @@ struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
980
950
return & cpu_rq (cpu )-> rt ;
981
951
}
982
952
983
- #ifdef CONFIG_SMP
984
953
static void __enable_runtime (struct rq * rq ) { }
985
954
static void __disable_runtime (struct rq * rq ) { }
986
- #endif
987
955
988
956
#endif /* !CONFIG_RT_GROUP_SCHED */
989
957
@@ -1078,8 +1046,6 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
1078
1046
cpufreq_update_util (rq , 0 );
1079
1047
}
1080
1048
1081
- #if defined CONFIG_SMP
1082
-
1083
1049
static void
1084
1050
inc_rt_prio_smp (struct rt_rq * rt_rq , int prio , int prev_prio )
1085
1051
{
@@ -1110,16 +1076,6 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1110
1076
cpupri_set (& rq -> rd -> cpupri , rq -> cpu , rt_rq -> highest_prio .curr );
1111
1077
}
1112
1078
1113
- #else /* !CONFIG_SMP: */
1114
-
1115
- static inline
1116
- void inc_rt_prio_smp (struct rt_rq * rt_rq , int prio , int prev_prio ) {}
1117
- static inline
1118
- void dec_rt_prio_smp (struct rt_rq * rt_rq , int prio , int prev_prio ) {}
1119
-
1120
- #endif /* !CONFIG_SMP */
1121
-
1122
- #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1123
1079
static void
1124
1080
inc_rt_prio (struct rt_rq * rt_rq , int prio )
1125
1081
{
@@ -1158,13 +1114,6 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio)
1158
1114
dec_rt_prio_smp (rt_rq , prio , prev_prio );
1159
1115
}
1160
1116
1161
- #else /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED): */
1162
-
1163
- static inline void inc_rt_prio (struct rt_rq * rt_rq , int prio ) {}
1164
- static inline void dec_rt_prio (struct rt_rq * rt_rq , int prio ) {}
1165
-
1166
- #endif /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED) */
1167
-
1168
1117
#ifdef CONFIG_RT_GROUP_SCHED
1169
1118
1170
1119
static void
@@ -1541,7 +1490,6 @@ static void yield_task_rt(struct rq *rq)
1541
1490
requeue_task_rt (rq , rq -> curr , 0 );
1542
1491
}
1543
1492
1544
- #ifdef CONFIG_SMP
1545
1493
static int find_lowest_rq (struct task_struct * task );
1546
1494
1547
1495
static int
@@ -1656,7 +1604,6 @@ static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1656
1604
1657
1605
return sched_stop_runnable (rq ) || sched_dl_runnable (rq ) || sched_rt_runnable (rq );
1658
1606
}
1659
- #endif /* CONFIG_SMP */
1660
1607
1661
1608
/*
1662
1609
* Preempt the current task with a newly woken task if needed:
@@ -1670,7 +1617,6 @@ static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1670
1617
return ;
1671
1618
}
1672
1619
1673
- #ifdef CONFIG_SMP
1674
1620
/*
1675
1621
* If:
1676
1622
*
@@ -1685,7 +1631,6 @@ static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1685
1631
*/
1686
1632
if (p -> prio == donor -> prio && !test_tsk_need_resched (rq -> curr ))
1687
1633
check_preempt_equal_prio (rq , p );
1688
- #endif /* CONFIG_SMP */
1689
1634
}
1690
1635
1691
1636
static inline void set_next_task_rt (struct rq * rq , struct task_struct * p , bool first )
@@ -1779,8 +1724,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_s
1779
1724
enqueue_pushable_task (rq , p );
1780
1725
}
1781
1726
1782
- #ifdef CONFIG_SMP
1783
-
1784
1727
/* Only try algorithms three times */
1785
1728
#define RT_MAX_TRIES 3
1786
1729
@@ -2454,11 +2397,6 @@ void __init init_sched_rt_class(void)
2454
2397
GFP_KERNEL , cpu_to_node (i ));
2455
2398
}
2456
2399
}
2457
- #else /* !CONFIG_SMP: */
2458
- void __init init_sched_rt_class (void )
2459
- {
2460
- }
2461
- #endif /* !CONFIG_SMP */
2462
2400
2463
2401
/*
2464
2402
* When switching a task to RT, we may overload the runqueue
@@ -2482,10 +2420,8 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
2482
2420
* then see if we can move to another run queue.
2483
2421
*/
2484
2422
if (task_on_rq_queued (p )) {
2485
- #ifdef CONFIG_SMP
2486
2423
if (p -> nr_cpus_allowed > 1 && rq -> rt .overloaded )
2487
2424
rt_queue_push_tasks (rq );
2488
- #endif /* CONFIG_SMP */
2489
2425
if (p -> prio < rq -> donor -> prio && cpu_online (cpu_of (rq )))
2490
2426
resched_curr (rq );
2491
2427
}
@@ -2502,7 +2438,6 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2502
2438
return ;
2503
2439
2504
2440
if (task_current_donor (rq , p )) {
2505
- #ifdef CONFIG_SMP
2506
2441
/*
2507
2442
* If our priority decreases while running, we
2508
2443
* may need to pull tasks to this runqueue.
@@ -2516,11 +2451,6 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2516
2451
*/
2517
2452
if (p -> prio > rq -> rt .highest_prio .curr )
2518
2453
resched_curr (rq );
2519
- #else /* !CONFIG_SMP: */
2520
- /* For UP simply resched on drop of prio */
2521
- if (oldprio < p -> prio )
2522
- resched_curr (rq );
2523
- #endif /* !CONFIG_SMP */
2524
2454
} else {
2525
2455
/*
2526
2456
* This task is not running, but if it is
@@ -2641,7 +2571,6 @@ DEFINE_SCHED_CLASS(rt) = {
2641
2571
.put_prev_task = put_prev_task_rt ,
2642
2572
.set_next_task = set_next_task_rt ,
2643
2573
2644
- #ifdef CONFIG_SMP
2645
2574
.balance = balance_rt ,
2646
2575
.select_task_rq = select_task_rq_rt ,
2647
2576
.set_cpus_allowed = set_cpus_allowed_common ,
@@ -2650,7 +2579,6 @@ DEFINE_SCHED_CLASS(rt) = {
2650
2579
.task_woken = task_woken_rt ,
2651
2580
.switched_from = switched_from_rt ,
2652
2581
.find_lock_rq = find_lock_lowest_rq ,
2653
- #endif /* !CONFIG_SMP */
2654
2582
2655
2583
.task_tick = task_tick_rt ,
2656
2584
0 commit comments