@@ -293,6 +293,12 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
293
293
mutex_unlock (& cpuset_mutex );
294
294
}
295
295
296
+ static inline bool cpuset_v2 (void )
297
+ {
298
+ return !IS_ENABLED (CONFIG_CPUSETS_V1 ) ||
299
+ cgroup_subsys_on_dfl (cpuset_cgrp_subsys );
300
+ }
301
+
296
302
/*
297
303
* Cgroup v2 behavior is used on the "cpus" and "mems" control files when
298
304
* on default hierarchy or when the cpuset_v2_mode flag is set by mounting
@@ -303,7 +309,7 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
303
309
*/
304
310
static inline bool is_in_v2_mode (void )
305
311
{
306
- return cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ) ||
312
+ return cpuset_v2 ( ) ||
307
313
(cpuset_cgrp_subsys .root -> flags & CGRP_ROOT_CPUSET_V2_MODE );
308
314
}
309
315
@@ -738,7 +744,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
738
744
int nslot ; /* next empty doms[] struct cpumask slot */
739
745
struct cgroup_subsys_state * pos_css ;
740
746
bool root_load_balance = is_sched_load_balance (& top_cpuset );
741
- bool cgrpv2 = cgroup_subsys_on_dfl ( cpuset_cgrp_subsys );
747
+ bool cgrpv2 = cpuset_v2 ( );
742
748
int nslot_update ;
743
749
744
750
doms = NULL ;
@@ -1198,7 +1204,7 @@ static void reset_partition_data(struct cpuset *cs)
1198
1204
{
1199
1205
struct cpuset * parent = parent_cs (cs );
1200
1206
1201
- if (!cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ))
1207
+ if (!cpuset_v2 ( ))
1202
1208
return ;
1203
1209
1204
1210
lockdep_assert_held (& callback_lock );
@@ -2017,7 +2023,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2017
2023
*/
2018
2024
if (!cp -> partition_root_state && !force &&
2019
2025
cpumask_equal (tmp -> new_cpus , cp -> effective_cpus ) &&
2020
- (!cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ) ||
2026
+ (!cpuset_v2 ( ) ||
2021
2027
(is_sched_load_balance (parent ) == is_sched_load_balance (cp )))) {
2022
2028
pos_css = css_rightmost_descendant (pos_css );
2023
2029
continue ;
@@ -2091,8 +2097,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2091
2097
* from parent if current cpuset isn't a valid partition root
2092
2098
* and their load balance states differ.
2093
2099
*/
2094
- if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
2095
- !is_partition_valid (cp ) &&
2100
+ if (cpuset_v2 () && !is_partition_valid (cp ) &&
2096
2101
(is_sched_load_balance (parent ) != is_sched_load_balance (cp ))) {
2097
2102
if (is_sched_load_balance (parent ))
2098
2103
set_bit (CS_SCHED_LOAD_BALANCE , & cp -> flags );
@@ -2108,8 +2113,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2108
2113
*/
2109
2114
if (!cpumask_empty (cp -> cpus_allowed ) &&
2110
2115
is_sched_load_balance (cp ) &&
2111
- (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) ||
2112
- is_partition_valid (cp )))
2116
+ (!cpuset_v2 () || is_partition_valid (cp )))
2113
2117
need_rebuild_sched_domains = true;
2114
2118
2115
2119
rcu_read_lock ();
@@ -2246,7 +2250,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2246
2250
2247
2251
retval = validate_change (cs , trialcs );
2248
2252
2249
- if ((retval == - EINVAL ) && cgroup_subsys_on_dfl ( cpuset_cgrp_subsys )) {
2253
+ if ((retval == - EINVAL ) && cpuset_v2 ( )) {
2250
2254
struct cgroup_subsys_state * css ;
2251
2255
struct cpuset * cp ;
2252
2256
@@ -2738,8 +2742,7 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2738
2742
spin_unlock_irq (& callback_lock );
2739
2743
2740
2744
if (!cpumask_empty (trialcs -> cpus_allowed ) && balance_flag_changed ) {
2741
- if (!IS_ENABLED (CONFIG_CPUSETS_V1 ) ||
2742
- cgroup_subsys_on_dfl (cpuset_cgrp_subsys ))
2745
+ if (cpuset_v2 ())
2743
2746
cpuset_force_rebuild ();
2744
2747
else
2745
2748
rebuild_sched_domains_locked ();
@@ -2925,8 +2928,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
2925
2928
* migration permission derives from hierarchy ownership in
2926
2929
* cgroup_procs_write_permission()).
2927
2930
*/
2928
- if (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) ||
2929
- (cpus_updated || mems_updated )) {
2931
+ if (!cpuset_v2 () || (cpus_updated || mems_updated )) {
2930
2932
ret = security_task_setscheduler (task );
2931
2933
if (ret )
2932
2934
goto out_unlock ;
@@ -3040,8 +3042,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
3040
3042
* in effective cpus and mems. In that case, we can optimize out
3041
3043
* by skipping the task iteration and update.
3042
3044
*/
3043
- if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
3044
- !cpus_updated && !mems_updated ) {
3045
+ if (cpuset_v2 () && !cpus_updated && !mems_updated ) {
3045
3046
cpuset_attach_nodemask_to = cs -> effective_mems ;
3046
3047
goto out ;
3047
3048
}
@@ -3391,7 +3392,7 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3391
3392
INIT_LIST_HEAD (& cs -> remote_sibling );
3392
3393
3393
3394
/* Set CS_MEMORY_MIGRATE for default hierarchy */
3394
- if (cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ))
3395
+ if (cpuset_v2 ( ))
3395
3396
__set_bit (CS_MEMORY_MIGRATE , & cs -> flags );
3396
3397
3397
3398
return & cs -> css ;
@@ -3418,8 +3419,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
3418
3419
/*
3419
3420
* For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3420
3421
*/
3421
- if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
3422
- !is_sched_load_balance (parent ))
3422
+ if (cpuset_v2 () && !is_sched_load_balance (parent ))
3423
3423
clear_bit (CS_SCHED_LOAD_BALANCE , & cs -> flags );
3424
3424
3425
3425
cpuset_inc ();
@@ -3489,8 +3489,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
3489
3489
if (is_partition_valid (cs ))
3490
3490
update_prstate (cs , 0 );
3491
3491
3492
- if (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
3493
- is_sched_load_balance (cs ))
3492
+ if (!cpuset_v2 () && is_sched_load_balance (cs ))
3494
3493
cpuset_update_flag (CS_SCHED_LOAD_BALANCE , cs , 0 );
3495
3494
3496
3495
cpuset_dec ();
0 commit comments