Skip to content

Commit 97c0054

Browse files
willdeaconPeter Zijlstra
authored andcommitted
cpuset: Cleanup cpuset_cpus_allowed_fallback() use in select_fallback_rq()
select_fallback_rq() only needs to recheck for an allowed CPU if the affinity mask of the task has changed since the last check. Return a 'bool' from cpuset_cpus_allowed_fallback() to indicate whether the affinity mask was updated, and use this to elide the allowed check when the mask has been left alone. No functional change. Suggested-by: Valentin Schneider <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 431c69f commit 97c0054

File tree

3 files changed

+12
-6
lines changed

3 files changed

+12
-6
lines changed

include/linux/cpuset.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void);
5959
extern void cpuset_read_lock(void);
6060
extern void cpuset_read_unlock(void);
6161
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
62-
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
62+
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
6363
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
6464
#define cpuset_current_mems_allowed (current->mems_allowed)
6565
void cpuset_init_current_mems_allowed(void);
@@ -188,8 +188,9 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
188188
cpumask_copy(mask, task_cpu_possible_mask(p));
189189
}
190190

191-
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
191+
static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
192192
{
193+
return false;
193194
}
194195

195196
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)

kernel/cgroup/cpuset.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3327,17 +3327,22 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
33273327
* which will not contain a sane cpumask during cases such as cpu hotplugging.
33283328
* This is the absolute last resort for the scheduler and it is only used if
33293329
* _every_ other avenue has been traveled.
3330+
*
3331+
* Returns true if the affinity of @tsk was changed, false otherwise.
33303332
**/
33313333

3332-
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3334+
bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
33333335
{
33343336
const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
33353337
const struct cpumask *cs_mask;
3338+
bool changed = false;
33363339

33373340
rcu_read_lock();
33383341
cs_mask = task_cs(tsk)->cpus_allowed;
3339-
if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask))
3342+
if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
33403343
do_set_cpus_allowed(tsk, cs_mask);
3344+
changed = true;
3345+
}
33413346
rcu_read_unlock();
33423347

33433348
/*
@@ -3357,6 +3362,7 @@ void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
33573362
* select_fallback_rq() will fix things ups and set cpu_possible_mask
33583363
* if required.
33593364
*/
3365+
return changed;
33603366
}
33613367

33623368
void __init cpuset_init_current_mems_allowed(void)

kernel/sched/core.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3141,8 +3141,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
31413141
/* No more Mr. Nice Guy. */
31423142
switch (state) {
31433143
case cpuset:
3144-
if (IS_ENABLED(CONFIG_CPUSETS)) {
3145-
cpuset_cpus_allowed_fallback(p);
3144+
if (cpuset_cpus_allowed_fallback(p)) {
31463145
state = possible;
31473146
break;
31483147
}

0 commit comments

Comments
 (0)