Skip to content

Commit 8834ace

Browse files
EricccTaiwanhtejun
authored andcommitted
sched_ext: Always use SMP versions in kernel/sched/ext_idle.c
Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. tj: Updated subject for clarity. Fixed stray #else block which wasn't removed causing build failure. Signed-off-by: Cheng-Yang Chou <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 6a1cda1 commit 8834ace

File tree

1 file changed

+1
-34
lines changed

1 file changed

+1
-34
lines changed

kernel/sched/ext_idle.c

Lines changed: 1 addition & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
1717
/* Enable/disable per-node idle cpumasks */
1818
static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
1919

20-
#ifdef CONFIG_SMP
2120
/* Enable/disable LLC aware optimizations */
2221
static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
2322

@@ -794,17 +793,6 @@ static void reset_idle_masks(struct sched_ext_ops *ops)
794793
cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
795794
}
796795
}
797-
#else /* !CONFIG_SMP */
798-
static bool scx_idle_test_and_clear_cpu(int cpu)
799-
{
800-
return -EBUSY;
801-
}
802-
803-
static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
804-
{
805-
return -EBUSY;
806-
}
807-
#endif /* CONFIG_SMP */
808796

809797
void scx_idle_enable(struct sched_ext_ops *ops)
810798
{
@@ -818,9 +806,7 @@ void scx_idle_enable(struct sched_ext_ops *ops)
818806
else
819807
static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
820808

821-
#ifdef CONFIG_SMP
822809
reset_idle_masks(ops);
823-
#endif
824810
}
825811

826812
void scx_idle_disable(void)
@@ -906,7 +892,6 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
906892
if (!rq)
907893
lockdep_assert_held(&p->pi_lock);
908894

909-
#ifdef CONFIG_SMP
910895
/*
911896
* This may also be called from ops.enqueue(), so we need to handle
912897
* per-CPU tasks as well. For these tasks, we can skip all idle CPU
@@ -923,9 +908,7 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
923908
cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
924909
allowed ?: p->cpus_ptr, flags);
925910
}
926-
#else
927-
cpu = -EBUSY;
928-
#endif
911+
929912
if (scx_kf_allowed_if_unlocked())
930913
task_rq_unlock(rq, p, &rf);
931914

@@ -1016,11 +999,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
1016999
if (node < 0)
10171000
return cpu_none_mask;
10181001

1019-
#ifdef CONFIG_SMP
10201002
return idle_cpumask(node)->cpu;
1021-
#else
1022-
return cpu_none_mask;
1023-
#endif
10241003
}
10251004

10261005
/**
@@ -1040,11 +1019,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
10401019
if (!check_builtin_idle_enabled())
10411020
return cpu_none_mask;
10421021

1043-
#ifdef CONFIG_SMP
10441022
return idle_cpumask(NUMA_NO_NODE)->cpu;
1045-
#else
1046-
return cpu_none_mask;
1047-
#endif
10481023
}
10491024

10501025
/**
@@ -1063,14 +1038,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
10631038
if (node < 0)
10641039
return cpu_none_mask;
10651040

1066-
#ifdef CONFIG_SMP
10671041
if (sched_smt_active())
10681042
return idle_cpumask(node)->smt;
10691043
else
10701044
return idle_cpumask(node)->cpu;
1071-
#else
1072-
return cpu_none_mask;
1073-
#endif
10741045
}
10751046

10761047
/**
@@ -1091,14 +1062,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
10911062
if (!check_builtin_idle_enabled())
10921063
return cpu_none_mask;
10931064

1094-
#ifdef CONFIG_SMP
10951065
if (sched_smt_active())
10961066
return idle_cpumask(NUMA_NO_NODE)->smt;
10971067
else
10981068
return idle_cpumask(NUMA_NO_NODE)->cpu;
1099-
#else
1100-
return cpu_none_mask;
1101-
#endif
11021069
}
11031070

11041071
/**

0 commit comments

Comments
 (0)