@@ -17,7 +17,6 @@ static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
17
17
/* Enable/disable per-node idle cpumasks */
18
18
static DEFINE_STATIC_KEY_FALSE (scx_builtin_idle_per_node );
19
19
20
- #ifdef CONFIG_SMP
21
20
/* Enable/disable LLC aware optimizations */
22
21
static DEFINE_STATIC_KEY_FALSE (scx_selcpu_topo_llc );
23
22
@@ -794,17 +793,6 @@ static void reset_idle_masks(struct sched_ext_ops *ops)
794
793
cpumask_and (idle_cpumask (node )-> smt , cpu_online_mask , node_mask );
795
794
}
796
795
}
797
- #else /* !CONFIG_SMP */
798
- static bool scx_idle_test_and_clear_cpu (int cpu )
799
- {
800
- return - EBUSY ;
801
- }
802
-
803
- static s32 scx_pick_idle_cpu (const struct cpumask * cpus_allowed , int node , u64 flags )
804
- {
805
- return - EBUSY ;
806
- }
807
- #endif /* CONFIG_SMP */
808
796
809
797
void scx_idle_enable (struct sched_ext_ops * ops )
810
798
{
@@ -818,9 +806,7 @@ void scx_idle_enable(struct sched_ext_ops *ops)
818
806
else
819
807
static_branch_disable_cpuslocked (& scx_builtin_idle_per_node );
820
808
821
- #ifdef CONFIG_SMP
822
809
reset_idle_masks (ops );
823
- #endif
824
810
}
825
811
826
812
void scx_idle_disable (void )
@@ -906,7 +892,6 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
906
892
if (!rq )
907
893
lockdep_assert_held (& p -> pi_lock );
908
894
909
- #ifdef CONFIG_SMP
910
895
/*
911
896
* This may also be called from ops.enqueue(), so we need to handle
912
897
* per-CPU tasks as well. For these tasks, we can skip all idle CPU
@@ -923,9 +908,7 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
923
908
cpu = scx_select_cpu_dfl (p , prev_cpu , wake_flags ,
924
909
allowed ?: p -> cpus_ptr , flags );
925
910
}
926
- #else
927
- cpu = - EBUSY ;
928
- #endif
911
+
929
912
if (scx_kf_allowed_if_unlocked ())
930
913
task_rq_unlock (rq , p , & rf );
931
914
@@ -1016,11 +999,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
1016
999
if (node < 0 )
1017
1000
return cpu_none_mask ;
1018
1001
1019
- #ifdef CONFIG_SMP
1020
1002
return idle_cpumask (node )-> cpu ;
1021
- #else
1022
- return cpu_none_mask ;
1023
- #endif
1024
1003
}
1025
1004
1026
1005
/**
@@ -1040,11 +1019,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
1040
1019
if (!check_builtin_idle_enabled ())
1041
1020
return cpu_none_mask ;
1042
1021
1043
- #ifdef CONFIG_SMP
1044
1022
return idle_cpumask (NUMA_NO_NODE )-> cpu ;
1045
- #else
1046
- return cpu_none_mask ;
1047
- #endif
1048
1023
}
1049
1024
1050
1025
/**
@@ -1063,14 +1038,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
1063
1038
if (node < 0 )
1064
1039
return cpu_none_mask ;
1065
1040
1066
- #ifdef CONFIG_SMP
1067
1041
if (sched_smt_active ())
1068
1042
return idle_cpumask (node )-> smt ;
1069
1043
else
1070
1044
return idle_cpumask (node )-> cpu ;
1071
- #else
1072
- return cpu_none_mask ;
1073
- #endif
1074
1045
}
1075
1046
1076
1047
/**
@@ -1091,14 +1062,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
1091
1062
if (!check_builtin_idle_enabled ())
1092
1063
return cpu_none_mask ;
1093
1064
1094
- #ifdef CONFIG_SMP
1095
1065
if (sched_smt_active ())
1096
1066
return idle_cpumask (NUMA_NO_NODE )-> smt ;
1097
1067
else
1098
1068
return idle_cpumask (NUMA_NO_NODE )-> cpu ;
1099
- #else
1100
- return cpu_none_mask ;
1101
- #endif
1102
1069
}
1103
1070
1104
1071
/**
0 commit comments