@@ -130,10 +130,9 @@ static cpumask_var_t subpartitions_cpus; /* RWCS */
130130static cpumask_var_t isolated_cpus ; /* CSCB */
131131
132132/*
133- * Set if isolated_cpus is being updated in the current cpuset_mutex
134- * critical section.
133+ * Set if housekeeping cpumasks are to be updated.
135134 */
136- static bool isolated_cpus_updating ; /* RWCS */
135+ static bool update_housekeeping ; /* RWCS */
137136
138137/*
139138 * A flag to force sched domain rebuild at the end of an operation.
@@ -1189,7 +1188,7 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
11891188 return ;
11901189 cpumask_andnot (isolated_cpus , isolated_cpus , xcpus );
11911190 }
1192- isolated_cpus_updating = true;
1191+ update_housekeeping = true;
11931192}
11941193
11951194/*
@@ -1307,22 +1306,22 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
13071306}
13081307
13091308/*
1310- * update_isolation_cpumasks - Update external isolation related CPU masks
1309+ * update_hk_sched_domains - Update HK cpumasks & rebuild sched domains
13111310 *
1312- * The following external CPU masks will be updated if necessary:
1313- * - workqueue unbound cpumask
1311+ * Update housekeeping cpumasks and rebuild sched domains if necessary.
1312+ * This should be called at the end of cpuset or hotplug actions.
13141313 */
1315- static void update_isolation_cpumasks (void )
1314+ static void update_hk_sched_domains (void )
13161315{
1317- int ret ;
1318-
1319- if (! isolated_cpus_updating )
1320- return ;
1321-
1322- ret = housekeeping_update ( isolated_cpus );
1323- WARN_ON_ONCE ( ret < 0 );
1324-
1325- isolated_cpus_updating = false ;
1316+ if ( update_housekeeping ) {
1317+ /* Updating HK cpumasks implies rebuild sched domains */
1318+ WARN_ON_ONCE ( housekeeping_update ( isolated_cpus ));
1319+ update_housekeeping = false ;
1320+ force_sd_rebuild = true;
1321+ }
1322+ /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
1323+ if ( force_sd_rebuild )
1324+ rebuild_sched_domains_locked () ;
13261325}
13271326
13281327/**
@@ -1473,7 +1472,6 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
14731472 cs -> remote_partition = true;
14741473 cpumask_copy (cs -> effective_xcpus , tmp -> new_cpus );
14751474 spin_unlock_irq (& callback_lock );
1476- update_isolation_cpumasks ();
14771475 cpuset_force_rebuild ();
14781476 cs -> prs_err = 0 ;
14791477
@@ -1518,7 +1516,6 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
15181516 compute_excpus (cs , cs -> effective_xcpus );
15191517 reset_partition_data (cs );
15201518 spin_unlock_irq (& callback_lock );
1521- update_isolation_cpumasks ();
15221519 cpuset_force_rebuild ();
15231520
15241521 /*
@@ -1589,7 +1586,6 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
15891586 if (xcpus )
15901587 cpumask_copy (cs -> exclusive_cpus , xcpus );
15911588 spin_unlock_irq (& callback_lock );
1592- update_isolation_cpumasks ();
15931589 if (adding || deleting )
15941590 cpuset_force_rebuild ();
15951591
@@ -1933,7 +1929,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
19331929 partition_xcpus_add (new_prs , parent , tmp -> delmask );
19341930
19351931 spin_unlock_irq (& callback_lock );
1936- update_isolation_cpumasks ();
19371932
19381933 if ((old_prs != new_prs ) && (cmd == partcmd_update ))
19391934 update_partition_exclusive_flag (cs , new_prs );
@@ -2901,7 +2896,6 @@ static int update_prstate(struct cpuset *cs, int new_prs)
29012896 else if (isolcpus_updated )
29022897 isolated_cpus_update (old_prs , new_prs , cs -> effective_xcpus );
29032898 spin_unlock_irq (& callback_lock );
2904- update_isolation_cpumasks ();
29052899
29062900 /* Force update if switching back to member & update effective_xcpus */
29072901 update_cpumasks_hier (cs , & tmpmask , !new_prs );
@@ -3191,9 +3185,8 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
31913185 }
31923186
31933187 free_cpuset (trialcs );
3194- if (force_sd_rebuild )
3195- rebuild_sched_domains_locked ();
31963188out_unlock :
3189+ update_hk_sched_domains ();
31973190 cpuset_full_unlock ();
31983191 if (of_cft (of )-> private == FILE_MEMLIST )
31993192 schedule_flush_migrate_mm ();
@@ -3301,6 +3294,7 @@ static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
33013294 cpuset_full_lock ();
33023295 if (is_cpuset_online (cs ))
33033296 retval = update_prstate (cs , val );
3297+ update_hk_sched_domains ();
33043298 cpuset_full_unlock ();
33053299 return retval ?: nbytes ;
33063300}
@@ -3475,6 +3469,7 @@ static void cpuset_css_killed(struct cgroup_subsys_state *css)
34753469 /* Reset valid partition back to member */
34763470 if (is_partition_valid (cs ))
34773471 update_prstate (cs , PRS_MEMBER );
3472+ update_hk_sched_domains ();
34783473 cpuset_full_unlock ();
34793474}
34803475
@@ -3882,10 +3877,12 @@ static void cpuset_handle_hotplug(void)
38823877 rcu_read_unlock ();
38833878 }
38843879
3885- /* rebuild sched domains if necessary */
3886- if (force_sd_rebuild )
3887- rebuild_sched_domains_cpuslocked ();
38883880
3881+ if (update_housekeeping || force_sd_rebuild ) {
3882+ mutex_lock (& cpuset_mutex );
3883+ update_hk_sched_domains ();
3884+ mutex_unlock (& cpuset_mutex );
3885+ }
38893886 free_tmpmasks (ptmp );
38903887}
38913888
0 commit comments