@@ -169,8 +169,6 @@ static const struct file_operations sched_feat_fops = {
169
169
.release = single_release ,
170
170
};
171
171
172
- #ifdef CONFIG_SMP
173
-
174
172
static ssize_t sched_scaling_write (struct file * filp , const char __user * ubuf ,
175
173
size_t cnt , loff_t * ppos )
176
174
{
@@ -217,8 +215,6 @@ static const struct file_operations sched_scaling_fops = {
217
215
.release = single_release ,
218
216
};
219
217
220
- #endif /* CONFIG_SMP */
221
-
222
218
#ifdef CONFIG_PREEMPT_DYNAMIC
223
219
224
220
static ssize_t sched_dynamic_write (struct file * filp , const char __user * ubuf ,
@@ -511,15 +507,13 @@ static __init int sched_init_debug(void)
511
507
debugfs_create_u32 ("latency_warn_ms" , 0644 , debugfs_sched , & sysctl_resched_latency_warn_ms );
512
508
debugfs_create_u32 ("latency_warn_once" , 0644 , debugfs_sched , & sysctl_resched_latency_warn_once );
513
509
514
- #ifdef CONFIG_SMP
515
510
debugfs_create_file ("tunable_scaling" , 0644 , debugfs_sched , NULL , & sched_scaling_fops );
516
511
debugfs_create_u32 ("migration_cost_ns" , 0644 , debugfs_sched , & sysctl_sched_migration_cost );
517
512
debugfs_create_u32 ("nr_migrate" , 0644 , debugfs_sched , & sysctl_sched_nr_migrate );
518
513
519
514
sched_domains_mutex_lock ();
520
515
update_sched_domain_debugfs ();
521
516
sched_domains_mutex_unlock ();
522
- #endif /* CONFIG_SMP */
523
517
524
518
#ifdef CONFIG_NUMA_BALANCING
525
519
numa = debugfs_create_dir ("numa_balancing" , debugfs_sched );
@@ -685,11 +679,9 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
685
679
}
686
680
687
681
P (se -> load .weight );
688
- #ifdef CONFIG_SMP
689
682
P (se -> avg .load_avg );
690
683
P (se -> avg .util_avg );
691
684
P (se -> avg .runnable_avg );
692
- #endif /* CONFIG_SMP */
693
685
694
686
#undef PN_SCHEDSTAT
695
687
#undef PN
@@ -849,7 +841,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
849
841
SEQ_printf (m , " .%-30s: %d\n" , "h_nr_queued" , cfs_rq -> h_nr_queued );
850
842
SEQ_printf (m , " .%-30s: %d\n" , "h_nr_idle" , cfs_rq -> h_nr_idle );
851
843
SEQ_printf (m , " .%-30s: %ld\n" , "load" , cfs_rq -> load .weight );
852
- #ifdef CONFIG_SMP
853
844
SEQ_printf (m , " .%-30s: %lu\n" , "load_avg" ,
854
845
cfs_rq -> avg .load_avg );
855
846
SEQ_printf (m , " .%-30s: %lu\n" , "runnable_avg" ,
@@ -870,7 +861,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
870
861
SEQ_printf (m , " .%-30s: %ld\n" , "tg_load_avg" ,
871
862
atomic_long_read (& cfs_rq -> tg -> load_avg ));
872
863
#endif /* CONFIG_FAIR_GROUP_SCHED */
873
- #endif /* CONFIG_SMP */
874
864
#ifdef CONFIG_CFS_BANDWIDTH
875
865
SEQ_printf (m , " .%-30s: %d\n" , "throttled" ,
876
866
cfs_rq -> throttled );
@@ -967,12 +957,10 @@ do { \
967
957
#undef P
968
958
#undef PN
969
959
970
- #ifdef CONFIG_SMP
971
960
#define P64 (n ) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
972
961
P64 (avg_idle );
973
962
P64 (max_idle_balance_cost );
974
963
#undef P64
975
- #endif /* CONFIG_SMP */
976
964
977
965
#define P (n ) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
978
966
if (schedstat_enabled ()) {
@@ -1242,7 +1230,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1242
1230
__PS ("nr_involuntary_switches" , p -> nivcsw );
1243
1231
1244
1232
P (se .load .weight );
1245
- #ifdef CONFIG_SMP
1246
1233
P (se .avg .load_sum );
1247
1234
P (se .avg .runnable_sum );
1248
1235
P (se .avg .util_sum );
@@ -1251,7 +1238,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1251
1238
P (se .avg .util_avg );
1252
1239
P (se .avg .last_update_time );
1253
1240
PM (se .avg .util_est , ~UTIL_AVG_UNCHANGED );
1254
- #endif /* CONFIG_SMP */
1255
1241
#ifdef CONFIG_UCLAMP_TASK
1256
1242
__PS ("uclamp.min" , p -> uclamp_req [UCLAMP_MIN ].value );
1257
1243
__PS ("uclamp.max" , p -> uclamp_req [UCLAMP_MAX ].value );
0 commit comments