@@ -1922,12 +1922,6 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
1922
1922
rcu_read_unlock ();
1923
1923
}
1924
1924
1925
- /*
1926
- * update_cpumasks_hier() flags
1927
- */
1928
- #define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
1929
- #define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
1930
-
1931
1925
/*
1932
1926
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1933
1927
* @cs: the cpuset to consider
@@ -1942,7 +1936,7 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
1942
1936
* Called with cpuset_mutex held
1943
1937
*/
1944
1938
static void update_cpumasks_hier (struct cpuset * cs , struct tmpmasks * tmp ,
1945
- int flags )
1939
+ bool force )
1946
1940
{
1947
1941
struct cpuset * cp ;
1948
1942
struct cgroup_subsys_state * pos_css ;
@@ -2007,10 +2001,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2007
2001
* Skip the whole subtree if
2008
2002
* 1) the cpumask remains the same,
2009
2003
* 2) has no partition root state,
2010
- * 3) HIER_CHECKALL flag not set, and
2004
+ * 3) force flag not set, and
2011
2005
* 4) for v2 load balance state same as its parent.
2012
2006
*/
2013
- if (!cp -> partition_root_state && !( flags & HIER_CHECKALL ) &&
2007
+ if (!cp -> partition_root_state && !force &&
2014
2008
cpumask_equal (tmp -> new_cpus , cp -> effective_cpus ) &&
2015
2009
(!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) ||
2016
2010
(is_sched_load_balance (parent ) == is_sched_load_balance (cp )))) {
@@ -2112,8 +2106,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2112
2106
}
2113
2107
rcu_read_unlock ();
2114
2108
2115
- if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD ) &&
2116
- !force_sd_rebuild )
2109
+ if (need_rebuild_sched_domains && !force_sd_rebuild )
2117
2110
rebuild_sched_domains_locked ();
2118
2111
}
2119
2112
@@ -2141,9 +2134,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2141
2134
* directly.
2142
2135
*
2143
2136
* The update_cpumasks_hier() function may sleep. So we have to
2144
- * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
2145
- * flag is used to suppress rebuild of sched domains as the callers
2146
- * will take care of that.
2137
+ * release the RCU read lock before calling it.
2147
2138
*/
2148
2139
rcu_read_lock ();
2149
2140
cpuset_for_each_child (sibling , pos_css , parent ) {
@@ -2159,7 +2150,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2159
2150
continue ;
2160
2151
2161
2152
rcu_read_unlock ();
2162
- update_cpumasks_hier (sibling , tmp , HIER_NO_SD_REBUILD );
2153
+ update_cpumasks_hier (sibling , tmp , false );
2163
2154
rcu_read_lock ();
2164
2155
css_put (& sibling -> css );
2165
2156
}
@@ -2179,7 +2170,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2179
2170
struct tmpmasks tmp ;
2180
2171
struct cpuset * parent = parent_cs (cs );
2181
2172
bool invalidate = false;
2182
- int hier_flags = 0 ;
2173
+ bool force = false ;
2183
2174
int old_prs = cs -> partition_root_state ;
2184
2175
2185
2176
/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
@@ -2240,8 +2231,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2240
2231
* Check all the descendants in update_cpumasks_hier() if
2241
2232
* effective_xcpus is to be changed.
2242
2233
*/
2243
- if (!cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus ))
2244
- hier_flags = HIER_CHECKALL ;
2234
+ force = !cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus );
2245
2235
2246
2236
retval = validate_change (cs , trialcs );
2247
2237
@@ -2309,7 +2299,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2309
2299
spin_unlock_irq (& callback_lock );
2310
2300
2311
2301
/* effective_cpus/effective_xcpus will be updated here */
2312
- update_cpumasks_hier (cs , & tmp , hier_flags );
2302
+ update_cpumasks_hier (cs , & tmp , force );
2313
2303
2314
2304
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2315
2305
if (cs -> partition_root_state )
@@ -2334,7 +2324,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2334
2324
struct tmpmasks tmp ;
2335
2325
struct cpuset * parent = parent_cs (cs );
2336
2326
bool invalidate = false;
2337
- int hier_flags = 0 ;
2327
+ bool force = false ;
2338
2328
int old_prs = cs -> partition_root_state ;
2339
2329
2340
2330
if (!* buf ) {
@@ -2357,8 +2347,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2357
2347
* Check all the descendants in update_cpumasks_hier() if
2358
2348
* effective_xcpus is to be changed.
2359
2349
*/
2360
- if (!cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus ))
2361
- hier_flags = HIER_CHECKALL ;
2350
+ force = !cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus );
2362
2351
2363
2352
retval = validate_change (cs , trialcs );
2364
2353
if (retval )
@@ -2411,8 +2400,8 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2411
2400
* of the subtree when it is a valid partition root or effective_xcpus
2412
2401
* is updated.
2413
2402
*/
2414
- if (is_partition_valid (cs ) || hier_flags )
2415
- update_cpumasks_hier (cs , & tmp , hier_flags );
2403
+ if (is_partition_valid (cs ) || force )
2404
+ update_cpumasks_hier (cs , & tmp , force );
2416
2405
2417
2406
/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2418
2407
if (cs -> partition_root_state )
@@ -2853,7 +2842,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
2853
2842
update_unbound_workqueue_cpumask (new_xcpus_state );
2854
2843
2855
2844
/* Force update if switching back to member */
2856
- update_cpumasks_hier (cs , & tmpmask , !new_prs ? HIER_CHECKALL : 0 );
2845
+ update_cpumasks_hier (cs , & tmpmask , !new_prs );
2857
2846
2858
2847
/* Update sched domains and load balance flag */
2859
2848
update_partition_sd_lb (cs , old_prs );
0 commit comments