Commit 68767a0a authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] sched: schedstats update for balance on fork

Add SCHEDSTAT statistics for sched-balance-fork.
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 147cbb4b
...@@ -517,10 +517,16 @@ struct sched_domain { ...@@ -517,10 +517,16 @@ struct sched_domain {
unsigned long alb_failed; unsigned long alb_failed;
unsigned long alb_pushed; unsigned long alb_pushed;
/* sched_balance_exec() stats */ /* SD_BALANCE_EXEC stats */
unsigned long sbe_attempts; unsigned long sbe_cnt;
unsigned long sbe_balanced;
unsigned long sbe_pushed; unsigned long sbe_pushed;
/* SD_BALANCE_FORK stats */
unsigned long sbf_cnt;
unsigned long sbf_balanced;
unsigned long sbf_pushed;
/* try_to_wake_up() stats */ /* try_to_wake_up() stats */
unsigned long ttwu_wake_remote; unsigned long ttwu_wake_remote;
unsigned long ttwu_move_affine; unsigned long ttwu_move_affine;
......
...@@ -309,7 +309,7 @@ static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) ...@@ -309,7 +309,7 @@ static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
* bump this up when changing the output format or the meaning of an existing * bump this up when changing the output format or the meaning of an existing
* format, so that tools can adapt (or abort) * format, so that tools can adapt (or abort)
*/ */
#define SCHEDSTAT_VERSION 11 #define SCHEDSTAT_VERSION 12
static int show_schedstat(struct seq_file *seq, void *v) static int show_schedstat(struct seq_file *seq, void *v)
{ {
...@@ -356,9 +356,10 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -356,9 +356,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->lb_nobusyq[itype], sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]); sd->lb_nobusyg[itype]);
} }
seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu\n", seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
sd->alb_cnt, sd->alb_failed, sd->alb_pushed, sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
sd->sbe_pushed, sd->sbe_attempts, sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
} }
#endif #endif
...@@ -1264,24 +1265,34 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) ...@@ -1264,24 +1265,34 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
sd = tmp; sd = tmp;
if (sd) { if (sd) {
int new_cpu;
struct sched_group *group; struct sched_group *group;
schedstat_inc(sd, sbf_cnt);
cpu = task_cpu(p); cpu = task_cpu(p);
group = find_idlest_group(sd, p, cpu); group = find_idlest_group(sd, p, cpu);
if (group) { if (!group) {
int new_cpu; schedstat_inc(sd, sbf_balanced);
new_cpu = find_idlest_cpu(group, cpu); goto no_forkbalance;
if (new_cpu != -1 && new_cpu != cpu && }
cpu_isset(new_cpu, p->cpus_allowed)) {
set_task_cpu(p, new_cpu); new_cpu = find_idlest_cpu(group, cpu);
task_rq_unlock(rq, &flags); if (new_cpu == -1 || new_cpu == cpu) {
rq = task_rq_lock(p, &flags); schedstat_inc(sd, sbf_balanced);
cpu = task_cpu(p); goto no_forkbalance;
} }
if (cpu_isset(new_cpu, p->cpus_allowed)) {
schedstat_inc(sd, sbf_pushed);
set_task_cpu(p, new_cpu);
task_rq_unlock(rq, &flags);
rq = task_rq_lock(p, &flags);
cpu = task_cpu(p);
} }
} }
#endif
no_forkbalance:
#endif
/* /*
* We decrease the sleep average of forking parents * We decrease the sleep average of forking parents
* and children as well, to keep max-interactive tasks * and children as well, to keep max-interactive tasks
...@@ -1618,30 +1629,28 @@ void sched_exec(void) ...@@ -1618,30 +1629,28 @@ void sched_exec(void)
struct sched_domain *tmp, *sd = NULL; struct sched_domain *tmp, *sd = NULL;
int new_cpu, this_cpu = get_cpu(); int new_cpu, this_cpu = get_cpu();
/* Prefer the current CPU if there's only this task running */
if (this_rq()->nr_running <= 1)
goto out;
for_each_domain(this_cpu, tmp) for_each_domain(this_cpu, tmp)
if (tmp->flags & SD_BALANCE_EXEC) if (tmp->flags & SD_BALANCE_EXEC)
sd = tmp; sd = tmp;
if (sd) { if (sd) {
struct sched_group *group; struct sched_group *group;
schedstat_inc(sd, sbe_attempts); schedstat_inc(sd, sbe_cnt);
group = find_idlest_group(sd, current, this_cpu); group = find_idlest_group(sd, current, this_cpu);
if (!group) if (!group) {
schedstat_inc(sd, sbe_balanced);
goto out; goto out;
}
new_cpu = find_idlest_cpu(group, this_cpu); new_cpu = find_idlest_cpu(group, this_cpu);
if (new_cpu == -1) if (new_cpu == -1 || new_cpu == this_cpu) {
schedstat_inc(sd, sbe_balanced);
goto out; goto out;
if (new_cpu != this_cpu) {
schedstat_inc(sd, sbe_pushed);
put_cpu();
sched_migrate_task(current, new_cpu);
return;
} }
schedstat_inc(sd, sbe_pushed);
put_cpu();
sched_migrate_task(current, new_cpu);
return;
} }
out: out:
put_cpu(); put_cpu();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment