Commit 08e2388a authored by Ingo Molnar's avatar Ingo Molnar

sched: clean up calc_weighted()

clean up calc_weighted() - we always use the normalized shift so
it's not needed to pass that in. Also, push the non-nice0 branch
into the function.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1091985b
...@@ -397,27 +397,16 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -397,27 +397,16 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
} }
/*
* We calculate fair deltas here, so protect against the random effects
* of a multiplication overflow by capping it to the runtime limit:
*/
#if BITS_PER_LONG == 32
static inline unsigned long static inline unsigned long
calc_weighted(unsigned long delta, unsigned long weight, int shift) calc_weighted(unsigned long delta, struct sched_entity *se)
{ {
u64 tmp = (u64)delta * weight >> shift; unsigned long weight = se->load.weight;
if (unlikely(tmp > sysctl_sched_runtime_limit*2)) if (unlikely(weight != NICE_0_LOAD))
return sysctl_sched_runtime_limit*2; return (u64)delta * se->load.weight >> NICE_0_SHIFT;
return tmp; else
return delta;
} }
#else
static inline unsigned long
calc_weighted(unsigned long delta, unsigned long weight, int shift)
{
return delta * weight >> shift;
}
#endif
/* /*
* Task is being enqueued - update stats: * Task is being enqueued - update stats:
...@@ -469,9 +458,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, ...@@ -469,9 +458,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
schedstat_set(se->wait_max, max(se->wait_max, schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start)); rq_of(cfs_rq)->clock - se->wait_start));
if (unlikely(se->load.weight != NICE_0_LOAD)) delta_fair = calc_weighted(delta_fair, se);
delta_fair = calc_weighted(delta_fair, se->load.weight,
NICE_0_SHIFT);
add_wait_runtime(cfs_rq, se, delta_fair); add_wait_runtime(cfs_rq, se, delta_fair);
} }
...@@ -554,9 +541,7 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, ...@@ -554,9 +541,7 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
delta_fair = div64_likely32((u64)delta_fair * load, delta_fair = div64_likely32((u64)delta_fair * load,
load + se->load.weight); load + se->load.weight);
if (unlikely(se->load.weight != NICE_0_LOAD)) delta_fair = calc_weighted(delta_fair, se);
delta_fair = calc_weighted(delta_fair, se->load.weight,
NICE_0_SHIFT);
prev_runtime = se->wait_runtime; prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair); __add_wait_runtime(cfs_rq, se, delta_fair);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment