Skip to content

Commit db3b02a

Browse files
willdeaconPeter Zijlstra
authored andcommitted
sched: Split the guts of sched_setaffinity() into a helper function
In preparation for replaying user affinity requests using a saved mask, split sched_setaffinity() up so that the initial task lookup and security checks are only performed when the request is coming directly from userspace. Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent b90ca8b commit db3b02a

File tree

1 file changed

+57
-48
lines changed

1 file changed

+57
-48
lines changed

kernel/sched/core.c

Lines changed: 57 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -7594,53 +7594,22 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
75947594
return retval;
75957595
}
75967596

7597-
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
7597+
static int
7598+
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
75987599
{
7599-
cpumask_var_t cpus_allowed, new_mask;
7600-
struct task_struct *p;
76017600
int retval;
7601+
cpumask_var_t cpus_allowed, new_mask;
76027602

7603-
rcu_read_lock();
7604-
7605-
p = find_process_by_pid(pid);
7606-
if (!p) {
7607-
rcu_read_unlock();
7608-
return -ESRCH;
7609-
}
7610-
7611-
/* Prevent p going away */
7612-
get_task_struct(p);
7613-
rcu_read_unlock();
7603+
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
7604+
return -ENOMEM;
76147605

7615-
if (p->flags & PF_NO_SETAFFINITY) {
7616-
retval = -EINVAL;
7617-
goto out_put_task;
7618-
}
7619-
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
7620-
retval = -ENOMEM;
7621-
goto out_put_task;
7622-
}
76237606
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
76247607
retval = -ENOMEM;
76257608
goto out_free_cpus_allowed;
76267609
}
7627-
retval = -EPERM;
7628-
if (!check_same_owner(p)) {
7629-
rcu_read_lock();
7630-
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
7631-
rcu_read_unlock();
7632-
goto out_free_new_mask;
7633-
}
7634-
rcu_read_unlock();
7635-
}
7636-
7637-
retval = security_task_setscheduler(p);
7638-
if (retval)
7639-
goto out_free_new_mask;
7640-
76417610

76427611
cpuset_cpus_allowed(p, cpus_allowed);
7643-
cpumask_and(new_mask, in_mask, cpus_allowed);
7612+
cpumask_and(new_mask, mask, cpus_allowed);
76447613

76457614
/*
76467615
* Since bandwidth control happens on root_domain basis,
@@ -7661,23 +7630,63 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
76617630
#endif
76627631
again:
76637632
retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
7633+
if (retval)
7634+
goto out_free_new_mask;
76647635

7665-
if (!retval) {
7666-
cpuset_cpus_allowed(p, cpus_allowed);
7667-
if (!cpumask_subset(new_mask, cpus_allowed)) {
7668-
/*
7669-
* We must have raced with a concurrent cpuset
7670-
* update. Just reset the cpus_allowed to the
7671-
* cpuset's cpus_allowed
7672-
*/
7673-
cpumask_copy(new_mask, cpus_allowed);
7674-
goto again;
7675-
}
7636+
cpuset_cpus_allowed(p, cpus_allowed);
7637+
if (!cpumask_subset(new_mask, cpus_allowed)) {
7638+
/*
7639+
* We must have raced with a concurrent cpuset update.
7640+
* Just reset the cpumask to the cpuset's cpus_allowed.
7641+
*/
7642+
cpumask_copy(new_mask, cpus_allowed);
7643+
goto again;
76767644
}
7645+
76777646
out_free_new_mask:
76787647
free_cpumask_var(new_mask);
76797648
out_free_cpus_allowed:
76807649
free_cpumask_var(cpus_allowed);
7650+
return retval;
7651+
}
7652+
7653+
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
7654+
{
7655+
struct task_struct *p;
7656+
int retval;
7657+
7658+
rcu_read_lock();
7659+
7660+
p = find_process_by_pid(pid);
7661+
if (!p) {
7662+
rcu_read_unlock();
7663+
return -ESRCH;
7664+
}
7665+
7666+
/* Prevent p going away */
7667+
get_task_struct(p);
7668+
rcu_read_unlock();
7669+
7670+
if (p->flags & PF_NO_SETAFFINITY) {
7671+
retval = -EINVAL;
7672+
goto out_put_task;
7673+
}
7674+
7675+
if (!check_same_owner(p)) {
7676+
rcu_read_lock();
7677+
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
7678+
rcu_read_unlock();
7679+
retval = -EPERM;
7680+
goto out_put_task;
7681+
}
7682+
rcu_read_unlock();
7683+
}
7684+
7685+
retval = security_task_setscheduler(p);
7686+
if (retval)
7687+
goto out_put_task;
7688+
7689+
retval = __sched_setaffinity(p, in_mask);
76817690
out_put_task:
76827691
put_task_struct(p);
76837692
return retval;

0 commit comments

Comments
 (0)