@@ -232,6 +232,13 @@ static cpumask_var_t isolated_cpus;
232
232
/* List of remote partition root children */
233
233
static struct list_head remote_children ;
234
234
235
+ /*
236
+ * A flag to force sched domain rebuild at the end of an operation while
237
+ * inhibiting it in the intermediate stages when set. Currently it is only
238
+ * set in hotplug code.
239
+ */
240
+ static bool force_sd_rebuild ;
241
+
235
242
/*
236
243
* Partition root states:
237
244
*
@@ -1475,7 +1482,7 @@ static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1475
1482
clear_bit (CS_SCHED_LOAD_BALANCE , & cs -> flags );
1476
1483
}
1477
1484
1478
- if (rebuild_domains )
1485
+ if (rebuild_domains && ! force_sd_rebuild )
1479
1486
rebuild_sched_domains_locked ();
1480
1487
}
1481
1488
@@ -1833,7 +1840,7 @@ static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
1833
1840
remote_partition_disable (child , tmp );
1834
1841
disable_cnt ++ ;
1835
1842
}
1836
- if (disable_cnt )
1843
+ if (disable_cnt && ! force_sd_rebuild )
1837
1844
rebuild_sched_domains_locked ();
1838
1845
}
1839
1846
@@ -1991,6 +1998,8 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1991
1998
part_error = PERR_CPUSEMPTY ;
1992
1999
goto write_error ;
1993
2000
}
2001
+ /* Check newmask again, whether cpus are available for parent/cs */
2002
+ nocpu |= tasks_nocpu_error (parent , cs , newmask );
1994
2003
1995
2004
/*
1996
2005
* partcmd_update with newmask:
@@ -2440,7 +2449,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2440
2449
}
2441
2450
rcu_read_unlock ();
2442
2451
2443
- if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD ))
2452
+ if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD ) &&
2453
+ !force_sd_rebuild )
2444
2454
rebuild_sched_domains_locked ();
2445
2455
}
2446
2456
@@ -2523,7 +2533,8 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2523
2533
*/
2524
2534
if (!* buf ) {
2525
2535
cpumask_clear (trialcs -> cpus_allowed );
2526
- cpumask_clear (trialcs -> effective_xcpus );
2536
+ if (cpumask_empty (trialcs -> exclusive_cpus ))
2537
+ cpumask_clear (trialcs -> effective_xcpus );
2527
2538
} else {
2528
2539
retval = cpulist_parse (buf , trialcs -> cpus_allowed );
2529
2540
if (retval < 0 )
@@ -3101,7 +3112,8 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
3101
3112
cs -> flags = trialcs -> flags ;
3102
3113
spin_unlock_irq (& callback_lock );
3103
3114
3104
- if (!cpumask_empty (trialcs -> cpus_allowed ) && balance_flag_changed )
3115
+ if (!cpumask_empty (trialcs -> cpus_allowed ) && balance_flag_changed &&
3116
+ !force_sd_rebuild )
3105
3117
rebuild_sched_domains_locked ();
3106
3118
3107
3119
if (spread_flag_changed )
@@ -4498,11 +4510,9 @@ hotplug_update_tasks(struct cpuset *cs,
4498
4510
update_tasks_nodemask (cs );
4499
4511
}
4500
4512
4501
- static bool force_rebuild ;
4502
-
4503
4513
void cpuset_force_rebuild (void )
4504
4514
{
4505
- force_rebuild = true;
4515
+ force_sd_rebuild = true;
4506
4516
}
4507
4517
4508
4518
/**
@@ -4650,15 +4660,9 @@ static void cpuset_handle_hotplug(void)
4650
4660
!cpumask_empty (subpartitions_cpus );
4651
4661
mems_updated = !nodes_equal (top_cpuset .effective_mems , new_mems );
4652
4662
4653
- /*
4654
- * In the rare case that hotplug removes all the cpus in
4655
- * subpartitions_cpus, we assumed that cpus are updated.
4656
- */
4657
- if (!cpus_updated && !cpumask_empty (subpartitions_cpus ))
4658
- cpus_updated = true;
4659
-
4660
4663
/* For v1, synchronize cpus_allowed to cpu_active_mask */
4661
4664
if (cpus_updated ) {
4665
+ cpuset_force_rebuild ();
4662
4666
spin_lock_irq (& callback_lock );
4663
4667
if (!on_dfl )
4664
4668
cpumask_copy (top_cpuset .cpus_allowed , & new_cpus );
@@ -4714,8 +4718,8 @@ static void cpuset_handle_hotplug(void)
4714
4718
}
4715
4719
4716
4720
/* rebuild sched domains if cpus_allowed has changed */
4717
- if (cpus_updated || force_rebuild ) {
4718
- force_rebuild = false;
4721
+ if (force_sd_rebuild ) {
4722
+ force_sd_rebuild = false;
4719
4723
rebuild_sched_domains_cpuslocked ();
4720
4724
}
4721
4725
0 commit comments