Commit da5a5522 authored by M.Baris Demiray's avatar M.Baris Demiray Committed by Linus Torvalds

[PATCH] sched: make idlest_group/cpu cpus_allowed-aware

Add relevant checks into find_idlest_group() and find_idlest_cpu() to make
them return only the groups that have allowed CPUs and allowed CPUs
respectively.
Signed-off-by: default avatarM.Baris Demiray <baris@labristeknoloji.com>
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fc38ed75
...@@ -966,8 +966,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -966,8 +966,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
int local_group; int local_group;
int i; int i;
/* Skip over this group if it has no CPUs allowed */
if (!cpus_intersects(group->cpumask, p->cpus_allowed))
goto nextgroup;
local_group = cpu_isset(this_cpu, group->cpumask); local_group = cpu_isset(this_cpu, group->cpumask);
/* XXX: put a cpus allowed check */
/* Tally up the load of all CPUs in the group */ /* Tally up the load of all CPUs in the group */
avg_load = 0; avg_load = 0;
...@@ -992,6 +995,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -992,6 +995,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
min_load = avg_load; min_load = avg_load;
idlest = group; idlest = group;
} }
nextgroup:
group = group->next; group = group->next;
} while (group != sd->groups); } while (group != sd->groups);
...@@ -1003,13 +1007,18 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -1003,13 +1007,18 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* /*
* find_idlest_queue - find the idlest runqueue among the cpus in group. * find_idlest_queue - find the idlest runqueue among the cpus in group.
*/ */
static int find_idlest_cpu(struct sched_group *group, int this_cpu) static int find_idlest_cpu(struct sched_group *group,
struct task_struct *p, int this_cpu)
{ {
cpumask_t tmp;
unsigned long load, min_load = ULONG_MAX; unsigned long load, min_load = ULONG_MAX;
int idlest = -1; int idlest = -1;
int i; int i;
for_each_cpu_mask(i, group->cpumask) { /* Traverse only the allowed CPUs */
cpus_and(tmp, group->cpumask, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
load = source_load(i, 0); load = source_load(i, 0);
if (load < min_load || (load == min_load && i == this_cpu)) { if (load < min_load || (load == min_load && i == this_cpu)) {
...@@ -1052,7 +1061,7 @@ static int sched_balance_self(int cpu, int flag) ...@@ -1052,7 +1061,7 @@ static int sched_balance_self(int cpu, int flag)
if (!group) if (!group)
goto nextlevel; goto nextlevel;
new_cpu = find_idlest_cpu(group, cpu); new_cpu = find_idlest_cpu(group, t, cpu);
if (new_cpu == -1 || new_cpu == cpu) if (new_cpu == -1 || new_cpu == cpu)
goto nextlevel; goto nextlevel;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment