Commit 72a4cf20 authored by Alex Shi's avatar Alex Shi Committed by Ingo Molnar

sched: Change cfs_rq load avg to unsigned long

Since the 'u64 runnable_load_avg, blocked_load_avg' in cfs_rq struct are
smaller than 'unsigned long' cfs_rq->load.weight. We don't need u64
vaiables to describe them. unsigned long is more efficient and convenience.
Signed-off-by: default avatarAlex Shi <alex.shi@intel.com>
Reviewed-by: default avatarPaul Turner <pjt@google.com>
Tested-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1371694737-29336-10-git-send-email-alex.shi@intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a003a25b
...@@ -211,9 +211,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -211,9 +211,9 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg", SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
cfs_rq->runnable_load_avg); cfs_rq->runnable_load_avg);
SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
cfs_rq->blocked_load_avg); cfs_rq->blocked_load_avg);
SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg", SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg",
(unsigned long long)atomic64_read(&cfs_rq->tg->load_avg)); (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
......
...@@ -4181,12 +4181,9 @@ static int tg_load_down(struct task_group *tg, void *data) ...@@ -4181,12 +4181,9 @@ static int tg_load_down(struct task_group *tg, void *data)
if (!tg->parent) { if (!tg->parent) {
load = cpu_rq(cpu)->avg.load_avg_contrib; load = cpu_rq(cpu)->avg.load_avg_contrib;
} else { } else {
unsigned long tmp_rla;
tmp_rla = tg->parent->cfs_rq[cpu]->runnable_load_avg + 1;
load = tg->parent->cfs_rq[cpu]->h_load; load = tg->parent->cfs_rq[cpu]->h_load;
load *= tg->se[cpu]->avg.load_avg_contrib; load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
load /= tmp_rla; tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
} }
tg->cfs_rq[cpu]->h_load = load; tg->cfs_rq[cpu]->h_load = load;
......
...@@ -277,7 +277,7 @@ struct cfs_rq { ...@@ -277,7 +277,7 @@ struct cfs_rq {
* This allows for the description of both thread and group usage (in * This allows for the description of both thread and group usage (in
* the FAIR_GROUP_SCHED case). * the FAIR_GROUP_SCHED case).
*/ */
u64 runnable_load_avg, blocked_load_avg; unsigned long runnable_load_avg, blocked_load_avg;
atomic64_t decay_counter, removed_load; atomic64_t decay_counter, removed_load;
u64 last_decay; u64 last_decay;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment