@@ -782,6 +782,7 @@ struct vm_area_struct {
782782struct mm_cid {
783783 u64 time ;
784784 int cid ;
785+ int recent_cid ;
785786};
786787#endif
787788
@@ -852,6 +853,27 @@ struct mm_struct {
852853 * When the next mm_cid scan is due (in jiffies).
853854 */
854855 unsigned long mm_cid_next_scan ;
856+ /**
857+ * @nr_cpus_allowed: Number of CPUs allowed for mm.
858+ *
859+ * Number of CPUs allowed in the union of all mm's
860+ * threads allowed CPUs.
861+ */
862+ unsigned int nr_cpus_allowed ;
863+ /**
864+ * @max_nr_cid: Maximum number of concurrency IDs allocated.
865+ *
866+ * Track the highest number of concurrency IDs allocated for the
867+ * mm.
868+ */
869+ atomic_t max_nr_cid ;
870+ /**
871+ * @cpus_allowed_lock: Lock protecting mm cpus_allowed.
872+ *
873+ * Provide mutual exclusion for mm cpus_allowed and
874+ * mm nr_cpus_allowed updates.
875+ */
876+ raw_spinlock_t cpus_allowed_lock ;
855877#endif
856878#ifdef CONFIG_MMU
857879 atomic_long_t pgtables_bytes ; /* size of all page tables */
@@ -1170,36 +1192,53 @@ static inline int mm_cid_clear_lazy_put(int cid)
11701192 return cid & ~MM_CID_LAZY_PUT ;
11711193}
11721194
1195+ /*
1196+ * mm_cpus_allowed: Union of all mm's threads allowed CPUs.
1197+ */
1198+ static inline cpumask_t * mm_cpus_allowed (struct mm_struct * mm )
1199+ {
1200+ unsigned long bitmap = (unsigned long )mm ;
1201+
1202+ bitmap += offsetof(struct mm_struct , cpu_bitmap );
1203+ /* Skip cpu_bitmap */
1204+ bitmap += cpumask_size ();
1205+ return (struct cpumask * )bitmap ;
1206+ }
1207+
11731208/* Accessor for struct mm_struct's cidmask. */
11741209static inline cpumask_t * mm_cidmask (struct mm_struct * mm )
11751210{
1176- unsigned long cid_bitmap = (unsigned long )mm ;
1211+ unsigned long cid_bitmap = (unsigned long )mm_cpus_allowed ( mm ) ;
11771212
1178- cid_bitmap += offsetof(struct mm_struct , cpu_bitmap );
1179- /* Skip cpu_bitmap */
1213+ /* Skip mm_cpus_allowed */
11801214 cid_bitmap += cpumask_size ();
11811215 return (struct cpumask * )cid_bitmap ;
11821216}
11831217
1184- static inline void mm_init_cid (struct mm_struct * mm )
1218+ static inline void mm_init_cid (struct mm_struct * mm , struct task_struct * p )
11851219{
11861220 int i ;
11871221
11881222 for_each_possible_cpu (i ) {
11891223 struct mm_cid * pcpu_cid = per_cpu_ptr (mm -> pcpu_cid , i );
11901224
11911225 pcpu_cid -> cid = MM_CID_UNSET ;
1226+ pcpu_cid -> recent_cid = MM_CID_UNSET ;
11921227 pcpu_cid -> time = 0 ;
11931228 }
1229+ mm -> nr_cpus_allowed = p -> nr_cpus_allowed ;
1230+ atomic_set (& mm -> max_nr_cid , 0 );
1231+ raw_spin_lock_init (& mm -> cpus_allowed_lock );
1232+ cpumask_copy (mm_cpus_allowed (mm ), & p -> cpus_mask );
11941233 cpumask_clear (mm_cidmask (mm ));
11951234}
11961235
1197- static inline int mm_alloc_cid_noprof (struct mm_struct * mm )
1236+ static inline int mm_alloc_cid_noprof (struct mm_struct * mm , struct task_struct * p )
11981237{
11991238 mm -> pcpu_cid = alloc_percpu_noprof (struct mm_cid );
12001239 if (!mm -> pcpu_cid )
12011240 return - ENOMEM ;
1202- mm_init_cid (mm );
1241+ mm_init_cid (mm , p );
12031242 return 0 ;
12041243}
12051244#define mm_alloc_cid (...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
@@ -1212,16 +1251,31 @@ static inline void mm_destroy_cid(struct mm_struct *mm)
12121251
12131252static inline unsigned int mm_cid_size (void )
12141253{
1215- return cpumask_size ();
1254+ return 2 * cpumask_size (); /* mm_cpus_allowed(), mm_cidmask(). */
1255+ }
1256+
1257+ static inline void mm_set_cpus_allowed (struct mm_struct * mm , const struct cpumask * cpumask )
1258+ {
1259+ struct cpumask * mm_allowed = mm_cpus_allowed (mm );
1260+
1261+ if (!mm )
1262+ return ;
1263+ /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
1264+ raw_spin_lock (& mm -> cpus_allowed_lock );
1265+ cpumask_or (mm_allowed , mm_allowed , cpumask );
1266+ WRITE_ONCE (mm -> nr_cpus_allowed , cpumask_weight (mm_allowed ));
1267+ raw_spin_unlock (& mm -> cpus_allowed_lock );
12161268}
12171269#else /* CONFIG_SCHED_MM_CID */
1218- static inline void mm_init_cid (struct mm_struct * mm ) { }
1219- static inline int mm_alloc_cid (struct mm_struct * mm ) { return 0 ; }
1270+ static inline void mm_init_cid (struct mm_struct * mm , struct task_struct * p ) { }
1271+ static inline int mm_alloc_cid (struct mm_struct * mm , struct task_struct * p ) { return 0 ; }
12201272static inline void mm_destroy_cid (struct mm_struct * mm ) { }
1273+
12211274static inline unsigned int mm_cid_size (void )
12221275{
12231276 return 0 ;
12241277}
1278+ static inline void mm_set_cpus_allowed (struct mm_struct * mm , const struct cpumask * cpumask ) { }
12251279#endif /* CONFIG_SCHED_MM_CID */
12261280
12271281struct mmu_gather ;
0 commit comments