@@ -2494,10 +2494,18 @@ int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2494
2494
return 0 ;
2495
2495
}
2496
2496
2497
+ static inline struct cpumask * clear_user_cpus_ptr (struct task_struct * p )
2498
+ {
2499
+ struct cpumask * user_mask = NULL ;
2500
+
2501
+ swap (p -> user_cpus_ptr , user_mask );
2502
+
2503
+ return user_mask ;
2504
+ }
2505
+
2497
2506
void release_user_cpus_ptr (struct task_struct * p )
2498
2507
{
2499
- kfree (p -> user_cpus_ptr );
2500
- p -> user_cpus_ptr = NULL ;
2508
+ kfree (clear_user_cpus_ptr (p ));
2501
2509
}
2502
2510
2503
2511
/*
@@ -2717,27 +2725,23 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
2717
2725
}
2718
2726
2719
2727
/*
2720
- * Change a given task's CPU affinity. Migrate the thread to a
2721
- * proper CPU and schedule it away if the CPU it's executing on
2722
- * is removed from the allowed bitmask.
2723
- *
2724
- * NOTE: the caller must have a valid reference to the task, the
2725
- * task must not exit() & deallocate itself prematurely. The
2726
- * call is not atomic; no spinlocks may be held.
2728
+ * Called with both p->pi_lock and rq->lock held; drops both before returning.
2727
2729
*/
2728
- static int __set_cpus_allowed_ptr (struct task_struct * p ,
2729
- const struct cpumask * new_mask ,
2730
- u32 flags )
2730
+ static int __set_cpus_allowed_ptr_locked (struct task_struct * p ,
2731
+ const struct cpumask * new_mask ,
2732
+ u32 flags ,
2733
+ struct rq * rq ,
2734
+ struct rq_flags * rf )
2735
+ __releases (rq - > lock )
2736
+ __releases (p - > pi_lock )
2731
2737
{
2732
2738
const struct cpumask * cpu_allowed_mask = task_cpu_possible_mask (p );
2733
2739
const struct cpumask * cpu_valid_mask = cpu_active_mask ;
2734
2740
bool kthread = p -> flags & PF_KTHREAD ;
2741
+ struct cpumask * user_mask = NULL ;
2735
2742
unsigned int dest_cpu ;
2736
- struct rq_flags rf ;
2737
- struct rq * rq ;
2738
2743
int ret = 0 ;
2739
2744
2740
- rq = task_rq_lock (p , & rf );
2741
2745
update_rq_clock (rq );
2742
2746
2743
2747
if (kthread || is_migration_disabled (p )) {
@@ -2793,20 +2797,178 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
2793
2797
2794
2798
__do_set_cpus_allowed (p , new_mask , flags );
2795
2799
2796
- return affine_move_task (rq , p , & rf , dest_cpu , flags );
2800
+ if (flags & SCA_USER )
2801
+ user_mask = clear_user_cpus_ptr (p );
2802
+
2803
+ ret = affine_move_task (rq , p , rf , dest_cpu , flags );
2804
+
2805
+ kfree (user_mask );
2806
+
2807
+ return ret ;
2797
2808
2798
2809
out :
2799
- task_rq_unlock (rq , p , & rf );
2810
+ task_rq_unlock (rq , p , rf );
2800
2811
2801
2812
return ret ;
2802
2813
}
2803
2814
2815
+ /*
2816
+ * Change a given task's CPU affinity. Migrate the thread to a
2817
+ * proper CPU and schedule it away if the CPU it's executing on
2818
+ * is removed from the allowed bitmask.
2819
+ *
2820
+ * NOTE: the caller must have a valid reference to the task, the
2821
+ * task must not exit() & deallocate itself prematurely. The
2822
+ * call is not atomic; no spinlocks may be held.
2823
+ */
2824
+ static int __set_cpus_allowed_ptr (struct task_struct * p ,
2825
+ const struct cpumask * new_mask , u32 flags )
2826
+ {
2827
+ struct rq_flags rf ;
2828
+ struct rq * rq ;
2829
+
2830
+ rq = task_rq_lock (p , & rf );
2831
+ return __set_cpus_allowed_ptr_locked (p , new_mask , flags , rq , & rf );
2832
+ }
2833
+
2804
2834
int set_cpus_allowed_ptr (struct task_struct * p , const struct cpumask * new_mask )
2805
2835
{
2806
2836
return __set_cpus_allowed_ptr (p , new_mask , 0 );
2807
2837
}
2808
2838
EXPORT_SYMBOL_GPL (set_cpus_allowed_ptr );
2809
2839
2840
+ /*
2841
+ * Change a given task's CPU affinity to the intersection of its current
2842
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
2843
+ * and pointing @p->user_cpus_ptr to a copy of the old mask.
2844
+ * If the resulting mask is empty, leave the affinity unchanged and return
2845
+ * -EINVAL.
2846
+ */
2847
+ static int restrict_cpus_allowed_ptr (struct task_struct * p ,
2848
+ struct cpumask * new_mask ,
2849
+ const struct cpumask * subset_mask )
2850
+ {
2851
+ struct cpumask * user_mask = NULL ;
2852
+ struct rq_flags rf ;
2853
+ struct rq * rq ;
2854
+ int err ;
2855
+
2856
+ if (!p -> user_cpus_ptr ) {
2857
+ user_mask = kmalloc (cpumask_size (), GFP_KERNEL );
2858
+ if (!user_mask )
2859
+ return - ENOMEM ;
2860
+ }
2861
+
2862
+ rq = task_rq_lock (p , & rf );
2863
+
2864
+ /*
2865
+ * Forcefully restricting the affinity of a deadline task is
2866
+ * likely to cause problems, so fail and noisily override the
2867
+ * mask entirely.
2868
+ */
2869
+ if (task_has_dl_policy (p ) && dl_bandwidth_enabled ()) {
2870
+ err = - EPERM ;
2871
+ goto err_unlock ;
2872
+ }
2873
+
2874
+ if (!cpumask_and (new_mask , & p -> cpus_mask , subset_mask )) {
2875
+ err = - EINVAL ;
2876
+ goto err_unlock ;
2877
+ }
2878
+
2879
+ /*
2880
+ * We're about to butcher the task affinity, so keep track of what
2881
+ * the user asked for in case we're able to restore it later on.
2882
+ */
2883
+ if (user_mask ) {
2884
+ cpumask_copy (user_mask , p -> cpus_ptr );
2885
+ p -> user_cpus_ptr = user_mask ;
2886
+ }
2887
+
2888
+ return __set_cpus_allowed_ptr_locked (p , new_mask , 0 , rq , & rf );
2889
+
2890
+ err_unlock :
2891
+ task_rq_unlock (rq , p , & rf );
2892
+ kfree (user_mask );
2893
+ return err ;
2894
+ }
2895
+
2896
+ /*
2897
+ * Restrict the CPU affinity of task @p so that it is a subset of
2898
+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
2899
+ * old affinity mask. If the resulting mask is empty, we warn and walk
2900
+ * up the cpuset hierarchy until we find a suitable mask.
2901
+ */
2902
+ void force_compatible_cpus_allowed_ptr (struct task_struct * p )
2903
+ {
2904
+ cpumask_var_t new_mask ;
2905
+ const struct cpumask * override_mask = task_cpu_possible_mask (p );
2906
+
2907
+ alloc_cpumask_var (& new_mask , GFP_KERNEL );
2908
+
2909
+ /*
2910
+ * __migrate_task() can fail silently in the face of concurrent
2911
+ * offlining of the chosen destination CPU, so take the hotplug
2912
+ * lock to ensure that the migration succeeds.
2913
+ */
2914
+ cpus_read_lock ();
2915
+ if (!cpumask_available (new_mask ))
2916
+ goto out_set_mask ;
2917
+
2918
+ if (!restrict_cpus_allowed_ptr (p , new_mask , override_mask ))
2919
+ goto out_free_mask ;
2920
+
2921
+ /*
2922
+ * We failed to find a valid subset of the affinity mask for the
2923
+ * task, so override it based on its cpuset hierarchy.
2924
+ */
2925
+ cpuset_cpus_allowed (p , new_mask );
2926
+ override_mask = new_mask ;
2927
+
2928
+ out_set_mask :
2929
+ if (printk_ratelimit ()) {
2930
+ printk_deferred ("Overriding affinity for process %d (%s) to CPUs %*pbl\n" ,
2931
+ task_pid_nr (p ), p -> comm ,
2932
+ cpumask_pr_args (override_mask ));
2933
+ }
2934
+
2935
+ WARN_ON (set_cpus_allowed_ptr (p , override_mask ));
2936
+ out_free_mask :
2937
+ cpus_read_unlock ();
2938
+ free_cpumask_var (new_mask );
2939
+ }
2940
+
2941
+ static int
2942
+ __sched_setaffinity (struct task_struct * p , const struct cpumask * mask );
2943
+
2944
+ /*
2945
+ * Restore the affinity of a task @p which was previously restricted by a
2946
+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
2947
+ * @p->user_cpus_ptr.
2948
+ *
2949
+ * It is the caller's responsibility to serialise this with any calls to
2950
+ * force_compatible_cpus_allowed_ptr(@p).
2951
+ */
2952
+ void relax_compatible_cpus_allowed_ptr (struct task_struct * p )
2953
+ {
2954
+ struct cpumask * user_mask = p -> user_cpus_ptr ;
2955
+ unsigned long flags ;
2956
+
2957
+ /*
2958
+ * Try to restore the old affinity mask. If this fails, then
2959
+ * we free the mask explicitly to avoid it being inherited across
2960
+ * a subsequent fork().
2961
+ */
2962
+ if (!user_mask || !__sched_setaffinity (p , user_mask ))
2963
+ return ;
2964
+
2965
+ raw_spin_lock_irqsave (& p -> pi_lock , flags );
2966
+ user_mask = clear_user_cpus_ptr (p );
2967
+ raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
2968
+
2969
+ kfree (user_mask );
2970
+ }
2971
+
2810
2972
void set_task_cpu (struct task_struct * p , unsigned int new_cpu )
2811
2973
{
2812
2974
#ifdef CONFIG_SCHED_DEBUG
@@ -7629,7 +7791,7 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
7629
7791
}
7630
7792
#endif
7631
7793
again :
7632
- retval = __set_cpus_allowed_ptr (p , new_mask , SCA_CHECK );
7794
+ retval = __set_cpus_allowed_ptr (p , new_mask , SCA_CHECK | SCA_USER );
7633
7795
if (retval )
7634
7796
goto out_free_new_mask ;
7635
7797
0 commit comments