Commit 16cfb1c0 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] sched: reduce active load balancing

Fix up active load balancing a bit so it doesn't get called when it shouldn't.
Reset the nr_balance_failed counter at more points where we have found
conditions to be balanced.  This reduces too aggressive active balancing seen
on some workloads.
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 81026794
...@@ -2021,6 +2021,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, ...@@ -2021,6 +2021,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
schedstat_inc(sd, lb_balanced[idle]); schedstat_inc(sd, lb_balanced[idle]);
sd->nr_balance_failed = 0;
/* tune up the balancing interval */ /* tune up the balancing interval */
if (sd->balance_interval < sd->max_interval) if (sd->balance_interval < sd->max_interval)
sd->balance_interval *= 2; sd->balance_interval *= 2;
...@@ -2046,16 +2047,14 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, ...@@ -2046,16 +2047,14 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE); group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
if (!group) { if (!group) {
schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
goto out; goto out_balanced;
} }
busiest = find_busiest_queue(group); busiest = find_busiest_queue(group);
if (!busiest || busiest == this_rq) { if (!busiest || busiest == this_rq) {
schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
goto out; goto out_balanced;
} }
/* Attempt to move tasks */ /* Attempt to move tasks */
...@@ -2066,11 +2065,16 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, ...@@ -2066,11 +2065,16 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
imbalance, sd, NEWLY_IDLE, NULL); imbalance, sd, NEWLY_IDLE, NULL);
if (!nr_moved) if (!nr_moved)
schedstat_inc(sd, lb_failed[NEWLY_IDLE]); schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
else
sd->nr_balance_failed = 0;
spin_unlock(&busiest->lock); spin_unlock(&busiest->lock);
out:
return nr_moved; return nr_moved;
out_balanced:
schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
sd->nr_balance_failed = 0;
return 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment