Skip to content

Commit b94f9ac

Browse files
Waiman-Longhtejun
authored andcommitted
cgroup/cpuset: Change references of cpuset_mutex to cpuset_rwsem
Since commit 1243dc5 ("cgroup/cpuset: Convert cpuset_mutex to percpu_rwsem"), cpuset_mutex has been replaced by cpuset_rwsem which is a percpu rwsem. However, the comments in kernel/cgroup/cpuset.c still reference cpuset_mutex which are now incorrect. Change all the references of cpuset_mutex to cpuset_rwsem. Fixes: 1243dc5 ("cgroup/cpuset: Convert cpuset_mutex to percpu_rwsem") Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 22b1255 commit b94f9ac

File tree

1 file changed

+29
-27
lines changed

1 file changed

+29
-27
lines changed

kernel/cgroup/cpuset.c

Lines changed: 29 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -311,17 +311,19 @@ static struct cpuset top_cpuset = {
311311
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
312312

313313
/*
314-
* There are two global locks guarding cpuset structures - cpuset_mutex and
314+
* There are two global locks guarding cpuset structures - cpuset_rwsem and
315315
* callback_lock. We also require taking task_lock() when dereferencing a
316316
* task's cpuset pointer. See "The task_lock() exception", at the end of this
317-
* comment.
317+
* comment. The cpuset code uses only cpuset_rwsem write lock. Other
318+
* kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
319+
* prevent change to cpuset structures.
318320
*
319321
* A task must hold both locks to modify cpusets. If a task holds
320-
* cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
322+
* cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
321323
* is the only task able to also acquire callback_lock and be able to
322324
* modify cpusets. It can perform various checks on the cpuset structure
323325
* first, knowing nothing will change. It can also allocate memory while
324-
* just holding cpuset_mutex. While it is performing these checks, various
326+
* just holding cpuset_rwsem. While it is performing these checks, various
325327
* callback routines can briefly acquire callback_lock to query cpusets.
326328
* Once it is ready to make the changes, it takes callback_lock, blocking
327329
* everyone else.
@@ -393,7 +395,7 @@ static inline bool is_in_v2_mode(void)
393395
* One way or another, we guarantee to return some non-empty subset
394396
* of cpu_online_mask.
395397
*
396-
* Call with callback_lock or cpuset_mutex held.
398+
* Call with callback_lock or cpuset_rwsem held.
397399
*/
398400
static void guarantee_online_cpus(struct task_struct *tsk,
399401
struct cpumask *pmask)
@@ -435,7 +437,7 @@ static void guarantee_online_cpus(struct task_struct *tsk,
435437
* One way or another, we guarantee to return some non-empty subset
436438
* of node_states[N_MEMORY].
437439
*
438-
* Call with callback_lock or cpuset_mutex held.
440+
* Call with callback_lock or cpuset_rwsem held.
439441
*/
440442
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
441443
{
@@ -447,7 +449,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
447449
/*
448450
* update task's spread flag if cpuset's page/slab spread flag is set
449451
*
450-
* Call with callback_lock or cpuset_mutex held.
452+
* Call with callback_lock or cpuset_rwsem held.
451453
*/
452454
static void cpuset_update_task_spread_flag(struct cpuset *cs,
453455
struct task_struct *tsk)
@@ -468,7 +470,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
468470
*
469471
* One cpuset is a subset of another if all its allowed CPUs and
470472
* Memory Nodes are a subset of the other, and its exclusive flags
471-
* are only set if the other's are set. Call holding cpuset_mutex.
473+
* are only set if the other's are set. Call holding cpuset_rwsem.
472474
*/
473475

474476
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -577,7 +579,7 @@ static inline void free_cpuset(struct cpuset *cs)
577579
* If we replaced the flag and mask values of the current cpuset
578580
* (cur) with those values in the trial cpuset (trial), would
579581
* our various subset and exclusive rules still be valid? Presumes
580-
* cpuset_mutex held.
582+
* cpuset_rwsem held.
581583
*
582584
* 'cur' is the address of an actual, in-use cpuset. Operations
583585
* such as list traversal that depend on the actual address of the
@@ -700,7 +702,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
700702
rcu_read_unlock();
701703
}
702704

703-
/* Must be called with cpuset_mutex held. */
705+
/* Must be called with cpuset_rwsem held. */
704706
static inline int nr_cpusets(void)
705707
{
706708
/* jump label reference count + the top-level cpuset */
@@ -726,7 +728,7 @@ static inline int nr_cpusets(void)
726728
* domains when operating in the severe memory shortage situations
727729
* that could cause allocation failures below.
728730
*
729-
* Must be called with cpuset_mutex held.
731+
* Must be called with cpuset_rwsem held.
730732
*
731733
* The three key local variables below are:
732734
* cp - cpuset pointer, used (together with pos_css) to perform a
@@ -1005,7 +1007,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
10051007
* 'cpus' is removed, then call this routine to rebuild the
10061008
* scheduler's dynamic sched domains.
10071009
*
1008-
* Call with cpuset_mutex held. Takes cpus_read_lock().
1010+
* Call with cpuset_rwsem held. Takes cpus_read_lock().
10091011
*/
10101012
static void rebuild_sched_domains_locked(void)
10111013
{
@@ -1078,7 +1080,7 @@ void rebuild_sched_domains(void)
10781080
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
10791081
*
10801082
* Iterate through each task of @cs updating its cpus_allowed to the
1081-
* effective cpuset's. As this function is called with cpuset_mutex held,
1083+
* effective cpuset's. As this function is called with cpuset_rwsem held,
10821084
* cpuset membership stays stable.
10831085
*/
10841086
static void update_tasks_cpumask(struct cpuset *cs)
@@ -1347,7 +1349,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
13471349
*
13481350
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
13491351
*
1350-
* Called with cpuset_mutex held
1352+
* Called with cpuset_rwsem held
13511353
*/
13521354
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
13531355
{
@@ -1704,12 +1706,12 @@ static void *cpuset_being_rebound;
17041706
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
17051707
*
17061708
* Iterate through each task of @cs updating its mems_allowed to the
1707-
* effective cpuset's. As this function is called with cpuset_mutex held,
1709+
* effective cpuset's. As this function is called with cpuset_rwsem held,
17081710
* cpuset membership stays stable.
17091711
*/
17101712
static void update_tasks_nodemask(struct cpuset *cs)
17111713
{
1712-
static nodemask_t newmems; /* protected by cpuset_mutex */
1714+
static nodemask_t newmems; /* protected by cpuset_rwsem */
17131715
struct css_task_iter it;
17141716
struct task_struct *task;
17151717

@@ -1722,7 +1724,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
17221724
* take while holding tasklist_lock. Forks can happen - the
17231725
* mpol_dup() cpuset_being_rebound check will catch such forks,
17241726
* and rebind their vma mempolicies too. Because we still hold
1725-
* the global cpuset_mutex, we know that no other rebind effort
1727+
* the global cpuset_rwsem, we know that no other rebind effort
17261728
* will be contending for the global variable cpuset_being_rebound.
17271729
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
17281730
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -1768,7 +1770,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
17681770
*
17691771
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
17701772
*
1771-
* Called with cpuset_mutex held
1773+
* Called with cpuset_rwsem held
17721774
*/
17731775
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
17741776
{
@@ -1821,7 +1823,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
18211823
* mempolicies and if the cpuset is marked 'memory_migrate',
18221824
* migrate the tasks pages to the new memory.
18231825
*
1824-
* Call with cpuset_mutex held. May take callback_lock during call.
1826+
* Call with cpuset_rwsem held. May take callback_lock during call.
18251827
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
18261828
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
18271829
* their mempolicies to the cpusets new mems_allowed.
@@ -1911,7 +1913,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
19111913
* @cs: the cpuset in which each task's spread flags needs to be changed
19121914
*
19131915
* Iterate through each task of @cs updating its spread flags. As this
1914-
* function is called with cpuset_mutex held, cpuset membership stays
1916+
* function is called with cpuset_rwsem held, cpuset membership stays
19151917
* stable.
19161918
*/
19171919
static void update_tasks_flags(struct cpuset *cs)
@@ -1931,7 +1933,7 @@ static void update_tasks_flags(struct cpuset *cs)
19311933
* cs: the cpuset to update
19321934
* turning_on: whether the flag is being set or cleared
19331935
*
1934-
* Call with cpuset_mutex held.
1936+
* Call with cpuset_rwsem held.
19351937
*/
19361938

19371939
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1980,7 +1982,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
19801982
* cs: the cpuset to update
19811983
* new_prs: new partition root state
19821984
*
1983-
* Call with cpuset_mutex held.
1985+
* Call with cpuset_rwsem held.
19841986
*/
19851987
static int update_prstate(struct cpuset *cs, int new_prs)
19861988
{
@@ -2167,7 +2169,7 @@ static int fmeter_getrate(struct fmeter *fmp)
21672169

21682170
static struct cpuset *cpuset_attach_old_cs;
21692171

2170-
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2172+
/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
21712173
static int cpuset_can_attach(struct cgroup_taskset *tset)
21722174
{
21732175
struct cgroup_subsys_state *css;
@@ -2219,15 +2221,15 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
22192221
}
22202222

22212223
/*
2222-
* Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
2224+
* Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach()
22232225
* but we can't allocate it dynamically there. Define it global and
22242226
* allocate from cpuset_init().
22252227
*/
22262228
static cpumask_var_t cpus_attach;
22272229

22282230
static void cpuset_attach(struct cgroup_taskset *tset)
22292231
{
2230-
/* static buf protected by cpuset_mutex */
2232+
/* static buf protected by cpuset_rwsem */
22312233
static nodemask_t cpuset_attach_nodemask_to;
22322234
struct task_struct *task;
22332235
struct task_struct *leader;
@@ -2417,7 +2419,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
24172419
* operation like this one can lead to a deadlock through kernfs
24182420
* active_ref protection. Let's break the protection. Losing the
24192421
* protection is okay as we check whether @cs is online after
2420-
* grabbing cpuset_mutex anyway. This only happens on the legacy
2422+
* grabbing cpuset_rwsem anyway. This only happens on the legacy
24212423
* hierarchies.
24222424
*/
24232425
css_get(&cs->css);
@@ -3672,7 +3674,7 @@ void __cpuset_memory_pressure_bump(void)
36723674
* - Used for /proc/<pid>/cpuset.
36733675
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
36743676
* doesn't really matter if tsk->cpuset changes after we read it,
3675-
* and we take cpuset_mutex, keeping cpuset_attach() from changing it
3677+
* and we take cpuset_rwsem, keeping cpuset_attach() from changing it
36763678
* anyway.
36773679
*/
36783680
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,

0 commit comments

Comments
 (0)