Skip to content

Commit 381b53c

Browse files
Chen Ridonghtejun
authored andcommitted
cgroup/cpuset: rename functions shared between v1 and v2
Some functions name declared in cpuset-internel.h are generic. To avoid confilicting with other variables for the same name, rename these functions with cpuset_/cpuset1_ prefix to make them unique to cpuset. Signed-off-by: Chen Ridong <[email protected]> Acked-by: Waiman Long <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent b0ced9d commit 381b53c

File tree

3 files changed

+56
-56
lines changed

3 files changed

+56
-56
lines changed

kernel/cgroup/cpuset-internal.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -267,26 +267,26 @@ static inline int is_spread_slab(const struct cpuset *cs)
267267
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
268268

269269
void rebuild_sched_domains_locked(void);
270-
void callback_lock_irq(void);
271-
void callback_unlock_irq(void);
272-
void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
273-
void update_tasks_nodemask(struct cpuset *cs);
274-
int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
270+
void cpuset_callback_lock_irq(void);
271+
void cpuset_callback_unlock_irq(void);
272+
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
273+
void cpuset_update_tasks_nodemask(struct cpuset *cs);
274+
int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
275275
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
276276
char *buf, size_t nbytes, loff_t off);
277277
int cpuset_common_seq_show(struct seq_file *sf, void *v);
278278

279279
/*
280280
* cpuset-v1.c
281281
*/
282-
extern struct cftype legacy_files[];
282+
extern struct cftype cpuset1_files[];
283283
void fmeter_init(struct fmeter *fmp);
284-
void cpuset_update_task_spread_flags(struct cpuset *cs,
284+
void cpuset1_update_task_spread_flags(struct cpuset *cs,
285285
struct task_struct *tsk);
286-
void update_tasks_flags(struct cpuset *cs);
287-
void hotplug_update_tasks_legacy(struct cpuset *cs,
286+
void cpuset1_update_tasks_flags(struct cpuset *cs);
287+
void cpuset1_hotplug_update_tasks(struct cpuset *cs,
288288
struct cpumask *new_cpus, nodemask_t *new_mems,
289289
bool cpus_updated, bool mems_updated);
290-
int validate_change_legacy(struct cpuset *cur, struct cpuset *trial);
290+
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
291291

292292
#endif /* __CPUSET_INTERNAL_H */

kernel/cgroup/cpuset-v1.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
209209
* Call with callback_lock or cpuset_mutex held. The check can be skipped
210210
* if on default hierarchy.
211211
*/
212-
void cpuset_update_task_spread_flags(struct cpuset *cs,
212+
void cpuset1_update_task_spread_flags(struct cpuset *cs,
213213
struct task_struct *tsk)
214214
{
215215
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
@@ -227,21 +227,21 @@ void cpuset_update_task_spread_flags(struct cpuset *cs,
227227
}
228228

229229
/**
230-
* update_tasks_flags - update the spread flags of tasks in the cpuset.
230+
* cpuset1_update_tasks_flags - update the spread flags of tasks in the cpuset.
231231
* @cs: the cpuset in which each task's spread flags needs to be changed
232232
*
233233
* Iterate through each task of @cs updating its spread flags. As this
234234
* function is called with cpuset_mutex held, cpuset membership stays
235235
* stable.
236236
*/
237-
void update_tasks_flags(struct cpuset *cs)
237+
void cpuset1_update_tasks_flags(struct cpuset *cs)
238238
{
239239
struct css_task_iter it;
240240
struct task_struct *task;
241241

242242
css_task_iter_start(&cs->css, 0, &it);
243243
while ((task = css_task_iter_next(&it)))
244-
cpuset_update_task_spread_flags(cs, task);
244+
cpuset1_update_task_spread_flags(cs, task);
245245
css_task_iter_end(&it);
246246
}
247247

@@ -282,27 +282,27 @@ static void cpuset_migrate_tasks_workfn(struct work_struct *work)
282282
kfree(s);
283283
}
284284

285-
void hotplug_update_tasks_legacy(struct cpuset *cs,
285+
void cpuset1_hotplug_update_tasks(struct cpuset *cs,
286286
struct cpumask *new_cpus, nodemask_t *new_mems,
287287
bool cpus_updated, bool mems_updated)
288288
{
289289
bool is_empty;
290290

291-
callback_lock_irq();
291+
cpuset_callback_lock_irq();
292292
cpumask_copy(cs->cpus_allowed, new_cpus);
293293
cpumask_copy(cs->effective_cpus, new_cpus);
294294
cs->mems_allowed = *new_mems;
295295
cs->effective_mems = *new_mems;
296-
callback_unlock_irq();
296+
cpuset_callback_unlock_irq();
297297

298298
/*
299-
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
299+
* Don't call cpuset_update_tasks_cpumask() if the cpuset becomes empty,
300300
* as the tasks will be migrated to an ancestor.
301301
*/
302302
if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
303-
update_tasks_cpumask(cs, new_cpus);
303+
cpuset_update_tasks_cpumask(cs, new_cpus);
304304
if (mems_updated && !nodes_empty(cs->mems_allowed))
305-
update_tasks_nodemask(cs);
305+
cpuset_update_tasks_nodemask(cs);
306306

307307
is_empty = cpumask_empty(cs->cpus_allowed) ||
308308
nodes_empty(cs->mems_allowed);
@@ -345,10 +345,10 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
345345
}
346346

347347
/*
348-
* validate_change_legacy() - Validate conditions specific to legacy (v1)
348+
* cpuset1_validate_change() - Validate conditions specific to legacy (v1)
349349
* behavior.
350350
*/
351-
int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
351+
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial)
352352
{
353353
struct cgroup_subsys_state *css;
354354
struct cpuset *c, *par;
@@ -421,28 +421,28 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
421421

422422
switch (type) {
423423
case FILE_CPU_EXCLUSIVE:
424-
retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
424+
retval = cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, val);
425425
break;
426426
case FILE_MEM_EXCLUSIVE:
427-
retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
427+
retval = cpuset_update_flag(CS_MEM_EXCLUSIVE, cs, val);
428428
break;
429429
case FILE_MEM_HARDWALL:
430-
retval = update_flag(CS_MEM_HARDWALL, cs, val);
430+
retval = cpuset_update_flag(CS_MEM_HARDWALL, cs, val);
431431
break;
432432
case FILE_SCHED_LOAD_BALANCE:
433-
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
433+
retval = cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
434434
break;
435435
case FILE_MEMORY_MIGRATE:
436-
retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
436+
retval = cpuset_update_flag(CS_MEMORY_MIGRATE, cs, val);
437437
break;
438438
case FILE_MEMORY_PRESSURE_ENABLED:
439439
cpuset_memory_pressure_enabled = !!val;
440440
break;
441441
case FILE_SPREAD_PAGE:
442-
retval = update_flag(CS_SPREAD_PAGE, cs, val);
442+
retval = cpuset_update_flag(CS_SPREAD_PAGE, cs, val);
443443
break;
444444
case FILE_SPREAD_SLAB:
445-
retval = update_flag(CS_SPREAD_SLAB, cs, val);
445+
retval = cpuset_update_flag(CS_SPREAD_SLAB, cs, val);
446446
break;
447447
default:
448448
retval = -EINVAL;
@@ -458,7 +458,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
458458
* for the common functions, 'private' gives the type of file
459459
*/
460460

461-
struct cftype legacy_files[] = {
461+
struct cftype cpuset1_files[] = {
462462
{
463463
.name = "cpus",
464464
.seq_show = cpuset_common_seq_show,

kernel/cgroup/cpuset.c

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -239,12 +239,12 @@ void cpuset_unlock(void)
239239

240240
static DEFINE_SPINLOCK(callback_lock);
241241

242-
void callback_lock_irq(void)
242+
void cpuset_callback_lock_irq(void)
243243
{
244244
spin_lock_irq(&callback_lock);
245245
}
246246

247-
void callback_unlock_irq(void)
247+
void cpuset_callback_unlock_irq(void)
248248
{
249249
spin_unlock_irq(&callback_lock);
250250
}
@@ -540,7 +540,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
540540
rcu_read_lock();
541541

542542
if (!is_in_v2_mode())
543-
ret = validate_change_legacy(cur, trial);
543+
ret = cpuset1_validate_change(cur, trial);
544544
if (ret)
545545
goto out;
546546

@@ -1053,7 +1053,7 @@ void rebuild_sched_domains(void)
10531053
}
10541054

10551055
/**
1056-
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1056+
* cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
10571057
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
10581058
* @new_cpus: the temp variable for the new effective_cpus mask
10591059
*
@@ -1063,7 +1063,7 @@ void rebuild_sched_domains(void)
10631063
* is used instead of effective_cpus to make sure all offline CPUs are also
10641064
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
10651065
*/
1066-
void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1066+
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
10671067
{
10681068
struct css_task_iter it;
10691069
struct task_struct *task;
@@ -1126,11 +1126,11 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
11261126
bool exclusive = (new_prs > PRS_MEMBER);
11271127

11281128
if (exclusive && !is_cpu_exclusive(cs)) {
1129-
if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1129+
if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
11301130
return PERR_NOTEXCL;
11311131
} else if (!exclusive && is_cpu_exclusive(cs)) {
11321132
/* Turning off CS_CPU_EXCLUSIVE will not return error */
1133-
update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1133+
cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
11341134
}
11351135
return 0;
11361136
}
@@ -1380,7 +1380,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
13801380
/*
13811381
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
13821382
*/
1383-
update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1383+
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
13841384
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
13851385
return 0;
13861386
}
@@ -1416,7 +1416,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
14161416
/*
14171417
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
14181418
*/
1419-
update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1419+
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
14201420
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
14211421
}
14221422

@@ -1468,7 +1468,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
14681468
/*
14691469
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
14701470
*/
1471-
update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1471+
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
14721472
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
14731473
return;
14741474

@@ -1840,7 +1840,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
18401840
update_partition_exclusive(cs, new_prs);
18411841

18421842
if (adding || deleting) {
1843-
update_tasks_cpumask(parent, tmp->addmask);
1843+
cpuset_update_tasks_cpumask(parent, tmp->addmask);
18441844
update_sibling_cpumasks(parent, cs, tmp);
18451845
}
18461846

@@ -2023,7 +2023,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
20232023
/*
20242024
* update_parent_effective_cpumask() should have been called
20252025
* for cs already in update_cpumask(). We should also call
2026-
* update_tasks_cpumask() again for tasks in the parent
2026+
* cpuset_update_tasks_cpumask() again for tasks in the parent
20272027
* cpuset if the parent's effective_cpus changes.
20282028
*/
20292029
if ((cp != cs) && old_prs) {
@@ -2080,7 +2080,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
20802080
WARN_ON(!is_in_v2_mode() &&
20812081
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
20822082

2083-
update_tasks_cpumask(cp, cp->effective_cpus);
2083+
cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
20842084

20852085
/*
20862086
* On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
@@ -2507,14 +2507,14 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
25072507
static void *cpuset_being_rebound;
25082508

25092509
/**
2510-
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2510+
* cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
25112511
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
25122512
*
25132513
* Iterate through each task of @cs updating its mems_allowed to the
25142514
* effective cpuset's. As this function is called with cpuset_mutex held,
25152515
* cpuset membership stays stable.
25162516
*/
2517-
void update_tasks_nodemask(struct cpuset *cs)
2517+
void cpuset_update_tasks_nodemask(struct cpuset *cs)
25182518
{
25192519
static nodemask_t newmems; /* protected by cpuset_mutex */
25202520
struct css_task_iter it;
@@ -2612,7 +2612,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
26122612
WARN_ON(!is_in_v2_mode() &&
26132613
!nodes_equal(cp->mems_allowed, cp->effective_mems));
26142614

2615-
update_tasks_nodemask(cp);
2615+
cpuset_update_tasks_nodemask(cp);
26162616

26172617
rcu_read_lock();
26182618
css_put(&cp->css);
@@ -2699,15 +2699,15 @@ bool current_cpuset_is_being_rebound(void)
26992699
}
27002700

27012701
/*
2702-
* update_flag - read a 0 or a 1 in a file and update associated flag
2702+
* cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
27032703
* bit: the bit to update (see cpuset_flagbits_t)
27042704
* cs: the cpuset to update
27052705
* turning_on: whether the flag is being set or cleared
27062706
*
27072707
* Call with cpuset_mutex held.
27082708
*/
27092709

2710-
int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2710+
int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
27112711
int turning_on)
27122712
{
27132713
struct cpuset *trialcs;
@@ -2743,7 +2743,7 @@ int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
27432743
rebuild_sched_domains_locked();
27442744

27452745
if (spread_flag_changed)
2746-
update_tasks_flags(cs);
2746+
cpuset1_update_tasks_flags(cs);
27472747
out:
27482748
free_cpuset(trialcs);
27492749
return err;
@@ -3008,7 +3008,7 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
30083008
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
30093009

30103010
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3011-
cpuset_update_task_spread_flags(cs, task);
3011+
cpuset1_update_task_spread_flags(cs, task);
30123012
}
30133013

30143014
static void cpuset_attach(struct cgroup_taskset *tset)
@@ -3484,7 +3484,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
34843484

34853485
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
34863486
is_sched_load_balance(cs))
3487-
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3487+
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
34883488

34893489
cpuset_dec();
34903490
clear_bit(CS_ONLINE, &cs->flags);
@@ -3623,7 +3623,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
36233623
.can_fork = cpuset_can_fork,
36243624
.cancel_fork = cpuset_cancel_fork,
36253625
.fork = cpuset_fork,
3626-
.legacy_cftypes = legacy_files,
3626+
.legacy_cftypes = cpuset1_files,
36273627
.dfl_cftypes = dfl_files,
36283628
.early_init = true,
36293629
.threaded = true,
@@ -3683,9 +3683,9 @@ hotplug_update_tasks(struct cpuset *cs,
36833683
spin_unlock_irq(&callback_lock);
36843684

36853685
if (cpus_updated)
3686-
update_tasks_cpumask(cs, new_cpus);
3686+
cpuset_update_tasks_cpumask(cs, new_cpus);
36873687
if (mems_updated)
3688-
update_tasks_nodemask(cs);
3688+
cpuset_update_tasks_nodemask(cs);
36893689
}
36903690

36913691
void cpuset_force_rebuild(void)
@@ -3786,7 +3786,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
37863786
hotplug_update_tasks(cs, &new_cpus, &new_mems,
37873787
cpus_updated, mems_updated);
37883788
else
3789-
hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3789+
cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
37903790
cpus_updated, mems_updated);
37913791

37923792
unlock:
@@ -3871,7 +3871,7 @@ static void cpuset_handle_hotplug(void)
38713871
top_cpuset.mems_allowed = new_mems;
38723872
top_cpuset.effective_mems = new_mems;
38733873
spin_unlock_irq(&callback_lock);
3874-
update_tasks_nodemask(&top_cpuset);
3874+
cpuset_update_tasks_nodemask(&top_cpuset);
38753875
}
38763876

38773877
mutex_unlock(&cpuset_mutex);

0 commit comments

Comments
 (0)