@@ -7895,6 +7895,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
7895
7895
return 0 ;
7896
7896
}
7897
7897
7898
+ static inline void sched_smt_present_inc (int cpu )
7899
+ {
7900
+ #ifdef CONFIG_SCHED_SMT
7901
+ if (cpumask_weight (cpu_smt_mask (cpu )) == 2 )
7902
+ static_branch_inc_cpuslocked (& sched_smt_present );
7903
+ #endif
7904
+ }
7905
+
7906
+ static inline void sched_smt_present_dec (int cpu )
7907
+ {
7908
+ #ifdef CONFIG_SCHED_SMT
7909
+ if (cpumask_weight (cpu_smt_mask (cpu )) == 2 )
7910
+ static_branch_dec_cpuslocked (& sched_smt_present );
7911
+ #endif
7912
+ }
7913
+
7898
7914
int sched_cpu_activate (unsigned int cpu )
7899
7915
{
7900
7916
struct rq * rq = cpu_rq (cpu );
@@ -7906,13 +7922,10 @@ int sched_cpu_activate(unsigned int cpu)
7906
7922
*/
7907
7923
balance_push_set (cpu , false);
7908
7924
7909
- #ifdef CONFIG_SCHED_SMT
7910
7925
/*
7911
7926
* When going up, increment the number of cores with SMT present.
7912
7927
*/
7913
- if (cpumask_weight (cpu_smt_mask (cpu )) == 2 )
7914
- static_branch_inc_cpuslocked (& sched_smt_present );
7915
- #endif
7928
+ sched_smt_present_inc (cpu );
7916
7929
set_cpu_active (cpu , true);
7917
7930
7918
7931
if (sched_smp_initialized ) {
@@ -7981,13 +7994,12 @@ int sched_cpu_deactivate(unsigned int cpu)
7981
7994
}
7982
7995
rq_unlock_irqrestore (rq , & rf );
7983
7996
7984
- #ifdef CONFIG_SCHED_SMT
7985
7997
/*
7986
7998
* When going down, decrement the number of cores with SMT present.
7987
7999
*/
7988
- if (cpumask_weight (cpu_smt_mask (cpu )) == 2 )
7989
- static_branch_dec_cpuslocked (& sched_smt_present );
8000
+ sched_smt_present_dec (cpu );
7990
8001
8002
+ #ifdef CONFIG_SCHED_SMT
7991
8003
sched_core_cpu_deactivate (cpu );
7992
8004
#endif
7993
8005
0 commit comments