@@ -285,7 +285,7 @@ static struct cpuset top_cpuset = {
285
285
*/
286
286
287
287
static DEFINE_MUTEX (cpuset_mutex );
288
- static DEFINE_SPINLOCK (callback_lock );
288
+ static DEFINE_RAW_SPINLOCK (callback_lock );
289
289
290
290
static struct workqueue_struct * cpuset_migrate_mm_wq ;
291
291
@@ -908,9 +908,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
908
908
continue ;
909
909
rcu_read_unlock ();
910
910
911
- spin_lock_irq (& callback_lock );
911
+ raw_spin_lock_irq (& callback_lock );
912
912
cpumask_copy (cp -> effective_cpus , new_cpus );
913
- spin_unlock_irq (& callback_lock );
913
+ raw_spin_unlock_irq (& callback_lock );
914
914
915
915
WARN_ON (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
916
916
!cpumask_equal (cp -> cpus_allowed , cp -> effective_cpus ));
@@ -975,9 +975,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
975
975
if (retval < 0 )
976
976
return retval ;
977
977
978
- spin_lock_irq (& callback_lock );
978
+ raw_spin_lock_irq (& callback_lock );
979
979
cpumask_copy (cs -> cpus_allowed , trialcs -> cpus_allowed );
980
- spin_unlock_irq (& callback_lock );
980
+ raw_spin_unlock_irq (& callback_lock );
981
981
982
982
/* use trialcs->cpus_allowed as a temp variable */
983
983
update_cpumasks_hier (cs , trialcs -> cpus_allowed );
@@ -1177,9 +1177,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1177
1177
continue ;
1178
1178
rcu_read_unlock ();
1179
1179
1180
- spin_lock_irq (& callback_lock );
1180
+ raw_spin_lock_irq (& callback_lock );
1181
1181
cp -> effective_mems = * new_mems ;
1182
- spin_unlock_irq (& callback_lock );
1182
+ raw_spin_unlock_irq (& callback_lock );
1183
1183
1184
1184
WARN_ON (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
1185
1185
!nodes_equal (cp -> mems_allowed , cp -> effective_mems ));
@@ -1247,9 +1247,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1247
1247
if (retval < 0 )
1248
1248
goto done ;
1249
1249
1250
- spin_lock_irq (& callback_lock );
1250
+ raw_spin_lock_irq (& callback_lock );
1251
1251
cs -> mems_allowed = trialcs -> mems_allowed ;
1252
- spin_unlock_irq (& callback_lock );
1252
+ raw_spin_unlock_irq (& callback_lock );
1253
1253
1254
1254
/* use trialcs->mems_allowed as a temp variable */
1255
1255
update_nodemasks_hier (cs , & trialcs -> mems_allowed );
@@ -1340,9 +1340,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1340
1340
spread_flag_changed = ((is_spread_slab (cs ) != is_spread_slab (trialcs ))
1341
1341
|| (is_spread_page (cs ) != is_spread_page (trialcs )));
1342
1342
1343
- spin_lock_irq (& callback_lock );
1343
+ raw_spin_lock_irq (& callback_lock );
1344
1344
cs -> flags = trialcs -> flags ;
1345
- spin_unlock_irq (& callback_lock );
1345
+ raw_spin_unlock_irq (& callback_lock );
1346
1346
1347
1347
if (!cpumask_empty (trialcs -> cpus_allowed ) && balance_flag_changed )
1348
1348
rebuild_sched_domains_locked ();
@@ -1757,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1757
1757
cpuset_filetype_t type = seq_cft (sf )-> private ;
1758
1758
int ret = 0 ;
1759
1759
1760
- spin_lock_irq (& callback_lock );
1760
+ raw_spin_lock_irq (& callback_lock );
1761
1761
1762
1762
switch (type ) {
1763
1763
case FILE_CPULIST :
@@ -1776,7 +1776,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1776
1776
ret = - EINVAL ;
1777
1777
}
1778
1778
1779
- spin_unlock_irq (& callback_lock );
1779
+ raw_spin_unlock_irq (& callback_lock );
1780
1780
return ret ;
1781
1781
}
1782
1782
@@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
1991
1991
1992
1992
cpuset_inc ();
1993
1993
1994
- spin_lock_irq (& callback_lock );
1994
+ raw_spin_lock_irq (& callback_lock );
1995
1995
if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys )) {
1996
1996
cpumask_copy (cs -> effective_cpus , parent -> effective_cpus );
1997
1997
cs -> effective_mems = parent -> effective_mems ;
1998
1998
}
1999
- spin_unlock_irq (& callback_lock );
1999
+ raw_spin_unlock_irq (& callback_lock );
2000
2000
2001
2001
if (!test_bit (CGRP_CPUSET_CLONE_CHILDREN , & css -> cgroup -> flags ))
2002
2002
goto out_unlock ;
@@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
2023
2023
}
2024
2024
rcu_read_unlock ();
2025
2025
2026
- spin_lock_irq (& callback_lock );
2026
+ raw_spin_lock_irq (& callback_lock );
2027
2027
cs -> mems_allowed = parent -> mems_allowed ;
2028
2028
cs -> effective_mems = parent -> mems_allowed ;
2029
2029
cpumask_copy (cs -> cpus_allowed , parent -> cpus_allowed );
2030
2030
cpumask_copy (cs -> effective_cpus , parent -> cpus_allowed );
2031
- spin_unlock_irq (& callback_lock );
2031
+ raw_spin_unlock_irq (& callback_lock );
2032
2032
out_unlock :
2033
2033
mutex_unlock (& cpuset_mutex );
2034
2034
return 0 ;
@@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
2067
2067
static void cpuset_bind (struct cgroup_subsys_state * root_css )
2068
2068
{
2069
2069
mutex_lock (& cpuset_mutex );
2070
- spin_lock_irq (& callback_lock );
2070
+ raw_spin_lock_irq (& callback_lock );
2071
2071
2072
2072
if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys )) {
2073
2073
cpumask_copy (top_cpuset .cpus_allowed , cpu_possible_mask );
@@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
2078
2078
top_cpuset .mems_allowed = top_cpuset .effective_mems ;
2079
2079
}
2080
2080
2081
- spin_unlock_irq (& callback_lock );
2081
+ raw_spin_unlock_irq (& callback_lock );
2082
2082
mutex_unlock (& cpuset_mutex );
2083
2083
}
2084
2084
@@ -2179,12 +2179,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
2179
2179
{
2180
2180
bool is_empty ;
2181
2181
2182
- spin_lock_irq (& callback_lock );
2182
+ raw_spin_lock_irq (& callback_lock );
2183
2183
cpumask_copy (cs -> cpus_allowed , new_cpus );
2184
2184
cpumask_copy (cs -> effective_cpus , new_cpus );
2185
2185
cs -> mems_allowed = * new_mems ;
2186
2186
cs -> effective_mems = * new_mems ;
2187
- spin_unlock_irq (& callback_lock );
2187
+ raw_spin_unlock_irq (& callback_lock );
2188
2188
2189
2189
/*
2190
2190
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -2221,10 +2221,10 @@ hotplug_update_tasks(struct cpuset *cs,
2221
2221
if (nodes_empty (* new_mems ))
2222
2222
* new_mems = parent_cs (cs )-> effective_mems ;
2223
2223
2224
- spin_lock_irq (& callback_lock );
2224
+ raw_spin_lock_irq (& callback_lock );
2225
2225
cpumask_copy (cs -> effective_cpus , new_cpus );
2226
2226
cs -> effective_mems = * new_mems ;
2227
- spin_unlock_irq (& callback_lock );
2227
+ raw_spin_unlock_irq (& callback_lock );
2228
2228
2229
2229
if (cpus_updated )
2230
2230
update_tasks_cpumask (cs );
@@ -2317,21 +2317,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
2317
2317
2318
2318
/* synchronize cpus_allowed to cpu_active_mask */
2319
2319
if (cpus_updated ) {
2320
- spin_lock_irq (& callback_lock );
2320
+ raw_spin_lock_irq (& callback_lock );
2321
2321
if (!on_dfl )
2322
2322
cpumask_copy (top_cpuset .cpus_allowed , & new_cpus );
2323
2323
cpumask_copy (top_cpuset .effective_cpus , & new_cpus );
2324
- spin_unlock_irq (& callback_lock );
2324
+ raw_spin_unlock_irq (& callback_lock );
2325
2325
/* we don't mess with cpumasks of tasks in top_cpuset */
2326
2326
}
2327
2327
2328
2328
/* synchronize mems_allowed to N_MEMORY */
2329
2329
if (mems_updated ) {
2330
- spin_lock_irq (& callback_lock );
2330
+ raw_spin_lock_irq (& callback_lock );
2331
2331
if (!on_dfl )
2332
2332
top_cpuset .mems_allowed = new_mems ;
2333
2333
top_cpuset .effective_mems = new_mems ;
2334
- spin_unlock_irq (& callback_lock );
2334
+ raw_spin_unlock_irq (& callback_lock );
2335
2335
update_tasks_nodemask (& top_cpuset );
2336
2336
}
2337
2337
@@ -2436,11 +2436,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2436
2436
{
2437
2437
unsigned long flags ;
2438
2438
2439
- spin_lock_irqsave (& callback_lock , flags );
2439
+ raw_spin_lock_irqsave (& callback_lock , flags );
2440
2440
rcu_read_lock ();
2441
2441
guarantee_online_cpus (task_cs (tsk ), pmask );
2442
2442
rcu_read_unlock ();
2443
- spin_unlock_irqrestore (& callback_lock , flags );
2443
+ raw_spin_unlock_irqrestore (& callback_lock , flags );
2444
2444
}
2445
2445
2446
2446
void cpuset_cpus_allowed_fallback (struct task_struct * tsk )
@@ -2488,11 +2488,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2488
2488
nodemask_t mask ;
2489
2489
unsigned long flags ;
2490
2490
2491
- spin_lock_irqsave (& callback_lock , flags );
2491
+ raw_spin_lock_irqsave (& callback_lock , flags );
2492
2492
rcu_read_lock ();
2493
2493
guarantee_online_mems (task_cs (tsk ), & mask );
2494
2494
rcu_read_unlock ();
2495
- spin_unlock_irqrestore (& callback_lock , flags );
2495
+ raw_spin_unlock_irqrestore (& callback_lock , flags );
2496
2496
2497
2497
return mask ;
2498
2498
}
@@ -2584,14 +2584,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
2584
2584
return true;
2585
2585
2586
2586
/* Not hardwall and node outside mems_allowed: scan up cpusets */
2587
- spin_lock_irqsave (& callback_lock , flags );
2587
+ raw_spin_lock_irqsave (& callback_lock , flags );
2588
2588
2589
2589
rcu_read_lock ();
2590
2590
cs = nearest_hardwall_ancestor (task_cs (current ));
2591
2591
allowed = node_isset (node , cs -> mems_allowed );
2592
2592
rcu_read_unlock ();
2593
2593
2594
- spin_unlock_irqrestore (& callback_lock , flags );
2594
+ raw_spin_unlock_irqrestore (& callback_lock , flags );
2595
2595
return allowed ;
2596
2596
}
2597
2597
0 commit comments