@@ -7594,53 +7594,22 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
7594
7594
return retval ;
7595
7595
}
7596
7596
7597
- long sched_setaffinity (pid_t pid , const struct cpumask * in_mask )
7597
+ static int
7598
+ __sched_setaffinity (struct task_struct * p , const struct cpumask * mask )
7598
7599
{
7599
- cpumask_var_t cpus_allowed , new_mask ;
7600
- struct task_struct * p ;
7601
7600
int retval ;
7601
+ cpumask_var_t cpus_allowed , new_mask ;
7602
7602
7603
- rcu_read_lock ();
7604
-
7605
- p = find_process_by_pid (pid );
7606
- if (!p ) {
7607
- rcu_read_unlock ();
7608
- return - ESRCH ;
7609
- }
7610
-
7611
- /* Prevent p going away */
7612
- get_task_struct (p );
7613
- rcu_read_unlock ();
7603
+ if (!alloc_cpumask_var (& cpus_allowed , GFP_KERNEL ))
7604
+ return - ENOMEM ;
7614
7605
7615
- if (p -> flags & PF_NO_SETAFFINITY ) {
7616
- retval = - EINVAL ;
7617
- goto out_put_task ;
7618
- }
7619
- if (!alloc_cpumask_var (& cpus_allowed , GFP_KERNEL )) {
7620
- retval = - ENOMEM ;
7621
- goto out_put_task ;
7622
- }
7623
7606
if (!alloc_cpumask_var (& new_mask , GFP_KERNEL )) {
7624
7607
retval = - ENOMEM ;
7625
7608
goto out_free_cpus_allowed ;
7626
7609
}
7627
- retval = - EPERM ;
7628
- if (!check_same_owner (p )) {
7629
- rcu_read_lock ();
7630
- if (!ns_capable (__task_cred (p )-> user_ns , CAP_SYS_NICE )) {
7631
- rcu_read_unlock ();
7632
- goto out_free_new_mask ;
7633
- }
7634
- rcu_read_unlock ();
7635
- }
7636
-
7637
- retval = security_task_setscheduler (p );
7638
- if (retval )
7639
- goto out_free_new_mask ;
7640
-
7641
7610
7642
7611
cpuset_cpus_allowed (p , cpus_allowed );
7643
- cpumask_and (new_mask , in_mask , cpus_allowed );
7612
+ cpumask_and (new_mask , mask , cpus_allowed );
7644
7613
7645
7614
/*
7646
7615
* Since bandwidth control happens on root_domain basis,
@@ -7661,23 +7630,63 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
7661
7630
#endif
7662
7631
again :
7663
7632
retval = __set_cpus_allowed_ptr (p , new_mask , SCA_CHECK );
7633
+ if (retval )
7634
+ goto out_free_new_mask ;
7664
7635
7665
- if (!retval ) {
7666
- cpuset_cpus_allowed (p , cpus_allowed );
7667
- if (!cpumask_subset (new_mask , cpus_allowed )) {
7668
- /*
7669
- * We must have raced with a concurrent cpuset
7670
- * update. Just reset the cpus_allowed to the
7671
- * cpuset's cpus_allowed
7672
- */
7673
- cpumask_copy (new_mask , cpus_allowed );
7674
- goto again ;
7675
- }
7636
+ cpuset_cpus_allowed (p , cpus_allowed );
7637
+ if (!cpumask_subset (new_mask , cpus_allowed )) {
7638
+ /*
7639
+ * We must have raced with a concurrent cpuset update.
7640
+ * Just reset the cpumask to the cpuset's cpus_allowed.
7641
+ */
7642
+ cpumask_copy (new_mask , cpus_allowed );
7643
+ goto again ;
7676
7644
}
7645
+
7677
7646
out_free_new_mask :
7678
7647
free_cpumask_var (new_mask );
7679
7648
out_free_cpus_allowed :
7680
7649
free_cpumask_var (cpus_allowed );
7650
+ return retval ;
7651
+ }
7652
+
7653
+ long sched_setaffinity (pid_t pid , const struct cpumask * in_mask )
7654
+ {
7655
+ struct task_struct * p ;
7656
+ int retval ;
7657
+
7658
+ rcu_read_lock ();
7659
+
7660
+ p = find_process_by_pid (pid );
7661
+ if (!p ) {
7662
+ rcu_read_unlock ();
7663
+ return - ESRCH ;
7664
+ }
7665
+
7666
+ /* Prevent p going away */
7667
+ get_task_struct (p );
7668
+ rcu_read_unlock ();
7669
+
7670
+ if (p -> flags & PF_NO_SETAFFINITY ) {
7671
+ retval = - EINVAL ;
7672
+ goto out_put_task ;
7673
+ }
7674
+
7675
+ if (!check_same_owner (p )) {
7676
+ rcu_read_lock ();
7677
+ if (!ns_capable (__task_cred (p )-> user_ns , CAP_SYS_NICE )) {
7678
+ rcu_read_unlock ();
7679
+ retval = - EPERM ;
7680
+ goto out_put_task ;
7681
+ }
7682
+ rcu_read_unlock ();
7683
+ }
7684
+
7685
+ retval = security_task_setscheduler (p );
7686
+ if (retval )
7687
+ goto out_put_task ;
7688
+
7689
+ retval = __sched_setaffinity (p , in_mask );
7681
7690
out_put_task :
7682
7691
put_task_struct (p );
7683
7692
return retval ;
0 commit comments