@@ -232,6 +232,13 @@ static cpumask_var_t isolated_cpus;
232232/* List of remote partition root children */
233233static struct list_head remote_children ;
234234
235+ /*
236+ * A flag to force sched domain rebuild at the end of an operation while
237+ * inhibiting it in the intermediate stages when set. Currently it is only
238+ * set in hotplug code.
239+ */
240+ static bool force_sd_rebuild ;
241+
235242/*
236243 * Partition root states:
237244 *
@@ -1475,7 +1482,7 @@ static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
14751482 clear_bit (CS_SCHED_LOAD_BALANCE , & cs -> flags );
14761483 }
14771484
1478- if (rebuild_domains )
1485+ if (rebuild_domains && ! force_sd_rebuild )
14791486 rebuild_sched_domains_locked ();
14801487}
14811488
@@ -1833,7 +1840,7 @@ static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
18331840 remote_partition_disable (child , tmp );
18341841 disable_cnt ++ ;
18351842 }
1836- if (disable_cnt )
1843+ if (disable_cnt && ! force_sd_rebuild )
18371844 rebuild_sched_domains_locked ();
18381845}
18391846
@@ -1991,6 +1998,8 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
19911998 part_error = PERR_CPUSEMPTY ;
19921999 goto write_error ;
19932000 }
2001+ /* Check newmask again, whether cpus are available for parent/cs */
2002+ nocpu |= tasks_nocpu_error (parent , cs , newmask );
19942003
19952004 /*
19962005 * partcmd_update with newmask:
@@ -2440,7 +2449,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
24402449 }
24412450 rcu_read_unlock ();
24422451
2443- if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD ))
2452+ if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD ) &&
2453+ !force_sd_rebuild )
24442454 rebuild_sched_domains_locked ();
24452455}
24462456
@@ -2523,7 +2533,8 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
25232533 */
25242534 if (!* buf ) {
25252535 cpumask_clear (trialcs -> cpus_allowed );
2526- cpumask_clear (trialcs -> effective_xcpus );
2536+ if (cpumask_empty (trialcs -> exclusive_cpus ))
2537+ cpumask_clear (trialcs -> effective_xcpus );
25272538 } else {
25282539 retval = cpulist_parse (buf , trialcs -> cpus_allowed );
25292540 if (retval < 0 )
@@ -3101,7 +3112,8 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
31013112 cs -> flags = trialcs -> flags ;
31023113 spin_unlock_irq (& callback_lock );
31033114
3104- if (!cpumask_empty (trialcs -> cpus_allowed ) && balance_flag_changed )
3115+ if (!cpumask_empty (trialcs -> cpus_allowed ) && balance_flag_changed &&
3116+ !force_sd_rebuild )
31053117 rebuild_sched_domains_locked ();
31063118
31073119 if (spread_flag_changed )
@@ -4498,11 +4510,9 @@ hotplug_update_tasks(struct cpuset *cs,
44984510 update_tasks_nodemask (cs );
44994511}
45004512
4501- static bool force_rebuild ;
4502-
45034513void cpuset_force_rebuild (void )
45044514{
4505- force_rebuild = true;
4515+ force_sd_rebuild = true;
45064516}
45074517
45084518/**
@@ -4650,15 +4660,9 @@ static void cpuset_handle_hotplug(void)
46504660 !cpumask_empty (subpartitions_cpus );
46514661 mems_updated = !nodes_equal (top_cpuset .effective_mems , new_mems );
46524662
4653- /*
4654- * In the rare case that hotplug removes all the cpus in
4655- * subpartitions_cpus, we assumed that cpus are updated.
4656- */
4657- if (!cpus_updated && !cpumask_empty (subpartitions_cpus ))
4658- cpus_updated = true;
4659-
46604663 /* For v1, synchronize cpus_allowed to cpu_active_mask */
46614664 if (cpus_updated ) {
4665+ cpuset_force_rebuild ();
46624666 spin_lock_irq (& callback_lock );
46634667 if (!on_dfl )
46644668 cpumask_copy (top_cpuset .cpus_allowed , & new_cpus );
@@ -4714,8 +4718,8 @@ static void cpuset_handle_hotplug(void)
47144718 }
47154719
47164720 /* rebuild sched domains if cpus_allowed has changed */
4717- if (cpus_updated || force_rebuild ) {
4718- force_rebuild = false;
4721+ if (force_sd_rebuild ) {
4722+ force_sd_rebuild = false;
47194723 rebuild_sched_domains_cpuslocked ();
47204724 }
47214725
0 commit comments