Commit 7a2127e6 authored by Will Deacon's avatar Will Deacon Committed by Tejun Heo

cpuset: Call set_cpus_allowed_ptr() with appropriate mask for task

set_cpus_allowed_ptr() will fail with -EINVAL if the requested
affinity mask is not a subset of the task_cpu_possible_mask() for the
task being updated. Consequently, on a heterogeneous system with cpusets
spanning the different CPU types, updates to the cgroup hierarchy can
silently fail to update task affinities when the effective affinity
mask for the cpuset is expanded.

For example, consider an arm64 system with 4 CPUs, where CPUs 2-3 are
the only cores capable of executing 32-bit tasks. Attaching a 32-bit
task to a cpuset containing CPUs 0-2 will correctly affine the task to
CPU 2. Extending the cpuset to CPUs 0-3, however, will fail to extend
the affinity mask of the 32-bit task because update_tasks_cpumask() will
pass the full 0-3 mask to set_cpus_allowed_ptr().

Extend update_tasks_cpumask() to take a temporary 'cpumask' paramater
and use it to mask the 'effective_cpus' mask with the possible mask for
each task being updated.

Fixes: 431c69fa ("cpuset: Honour task_cpu_possible_mask() in guarantee_online_cpus()")
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Acked-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 3fb906e7
...@@ -1205,12 +1205,13 @@ void rebuild_sched_domains(void) ...@@ -1205,12 +1205,13 @@ void rebuild_sched_domains(void)
/** /**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
* @new_cpus: the temp variable for the new effective_cpus mask
* *
* Iterate through each task of @cs updating its cpus_allowed to the * Iterate through each task of @cs updating its cpus_allowed to the
* effective cpuset's. As this function is called with cpuset_rwsem held, * effective cpuset's. As this function is called with cpuset_rwsem held,
* cpuset membership stays stable. * cpuset membership stays stable.
*/ */
static void update_tasks_cpumask(struct cpuset *cs) static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
{ {
struct css_task_iter it; struct css_task_iter it;
struct task_struct *task; struct task_struct *task;
...@@ -1224,7 +1225,10 @@ static void update_tasks_cpumask(struct cpuset *cs) ...@@ -1224,7 +1225,10 @@ static void update_tasks_cpumask(struct cpuset *cs)
if (top_cs && (task->flags & PF_KTHREAD) && if (top_cs && (task->flags & PF_KTHREAD) &&
kthread_is_per_cpu(task)) kthread_is_per_cpu(task))
continue; continue;
set_cpus_allowed_ptr(task, cs->effective_cpus);
cpumask_and(new_cpus, cs->effective_cpus,
task_cpu_possible_mask(task));
set_cpus_allowed_ptr(task, new_cpus);
} }
css_task_iter_end(&it); css_task_iter_end(&it);
} }
...@@ -1509,7 +1513,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, ...@@ -1509,7 +1513,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
spin_unlock_irq(&callback_lock); spin_unlock_irq(&callback_lock);
if (adding || deleting) if (adding || deleting)
update_tasks_cpumask(parent); update_tasks_cpumask(parent, tmp->new_cpus);
/* /*
* Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary. * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
...@@ -1661,7 +1665,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, ...@@ -1661,7 +1665,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
WARN_ON(!is_in_v2_mode() && WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
update_tasks_cpumask(cp); update_tasks_cpumask(cp, tmp->new_cpus);
/* /*
* On legacy hierarchy, if the effective cpumask of any non- * On legacy hierarchy, if the effective cpumask of any non-
...@@ -2309,7 +2313,7 @@ static int update_prstate(struct cpuset *cs, int new_prs) ...@@ -2309,7 +2313,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
} }
} }
update_tasks_cpumask(parent); update_tasks_cpumask(parent, tmpmask.new_cpus);
if (parent->child_ecpus_count) if (parent->child_ecpus_count)
update_sibling_cpumasks(parent, cs, &tmpmask); update_sibling_cpumasks(parent, cs, &tmpmask);
...@@ -3348,7 +3352,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs, ...@@ -3348,7 +3352,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
* as the tasks will be migrated to an ancestor. * as the tasks will be migrated to an ancestor.
*/ */
if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
update_tasks_cpumask(cs); update_tasks_cpumask(cs, new_cpus);
if (mems_updated && !nodes_empty(cs->mems_allowed)) if (mems_updated && !nodes_empty(cs->mems_allowed))
update_tasks_nodemask(cs); update_tasks_nodemask(cs);
...@@ -3385,7 +3389,7 @@ hotplug_update_tasks(struct cpuset *cs, ...@@ -3385,7 +3389,7 @@ hotplug_update_tasks(struct cpuset *cs,
spin_unlock_irq(&callback_lock); spin_unlock_irq(&callback_lock);
if (cpus_updated) if (cpus_updated)
update_tasks_cpumask(cs); update_tasks_cpumask(cs, new_cpus);
if (mems_updated) if (mems_updated)
update_tasks_nodemask(cs); update_tasks_nodemask(cs);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment