Commit 0ccb977f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/fair: Explicitly generate __update_load_avg() instances

The __update_load_avg() function is an __always_inline because its
used with constant propagation to generate different variants of the
code without having to duplicate it (which would be prone to bugs).

Explicitly instantiate the 3 variants.

Note that most of this is called from rather hot paths, so reducing
branches is good.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 05b40e05
...@@ -2849,7 +2849,7 @@ static u32 __compute_runnable_contrib(u64 n) ...@@ -2849,7 +2849,7 @@ static u32 __compute_runnable_contrib(u64 n)
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/ */
static __always_inline int static __always_inline int
__update_load_avg(u64 now, int cpu, struct sched_avg *sa, ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
unsigned long weight, int running, struct cfs_rq *cfs_rq) unsigned long weight, int running, struct cfs_rq *cfs_rq)
{ {
u64 delta, scaled_delta, periods; u64 delta, scaled_delta, periods;
...@@ -2953,6 +2953,28 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, ...@@ -2953,6 +2953,28 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
return decayed; return decayed;
} }
static int
__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
{
return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL);
}
static int
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return ___update_load_avg(now, cpu, &se->avg,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
}
static int
__update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
{
return ___update_load_avg(now, cpu, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
cfs_rq->curr != NULL, cfs_rq);
}
/* /*
* Signed add and clamp on underflow. * Signed add and clamp on underflow.
* *
...@@ -3014,6 +3036,9 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) ...@@ -3014,6 +3036,9 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
void set_task_rq_fair(struct sched_entity *se, void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next) struct cfs_rq *prev, struct cfs_rq *next)
{ {
u64 p_last_update_time;
u64 n_last_update_time;
if (!sched_feat(ATTACH_AGE_LOAD)) if (!sched_feat(ATTACH_AGE_LOAD))
return; return;
...@@ -3024,11 +3049,11 @@ void set_task_rq_fair(struct sched_entity *se, ...@@ -3024,11 +3049,11 @@ void set_task_rq_fair(struct sched_entity *se,
* time. This will result in the wakee task is less decayed, but giving * time. This will result in the wakee task is less decayed, but giving
* the wakee more load sounds not bad. * the wakee more load sounds not bad.
*/ */
if (se->avg.last_update_time && prev) { if (!(se->avg.last_update_time && prev))
u64 p_last_update_time; return;
u64 n_last_update_time;
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
{
u64 p_last_update_time_copy; u64 p_last_update_time_copy;
u64 n_last_update_time_copy; u64 n_last_update_time_copy;
...@@ -3043,14 +3068,13 @@ void set_task_rq_fair(struct sched_entity *se, ...@@ -3043,14 +3068,13 @@ void set_task_rq_fair(struct sched_entity *se,
} while (p_last_update_time != p_last_update_time_copy || } while (p_last_update_time != p_last_update_time_copy ||
n_last_update_time != n_last_update_time_copy); n_last_update_time != n_last_update_time_copy);
}
#else #else
p_last_update_time = prev->avg.last_update_time; p_last_update_time = prev->avg.last_update_time;
n_last_update_time = next->avg.last_update_time; n_last_update_time = next->avg.last_update_time;
#endif #endif
__update_load_avg(p_last_update_time, cpu_of(rq_of(prev)), __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
&se->avg, 0, 0, NULL);
se->avg.last_update_time = n_last_update_time; se->avg.last_update_time = n_last_update_time;
}
} }
/* Take into account change of utilization of a child task group */ /* Take into account change of utilization of a child task group */
...@@ -3295,8 +3319,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) ...@@ -3295,8 +3319,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
set_tg_cfs_propagate(cfs_rq); set_tg_cfs_propagate(cfs_rq);
} }
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
smp_wmb(); smp_wmb();
...@@ -3328,11 +3351,8 @@ static inline void update_load_avg(struct sched_entity *se, int flags) ...@@ -3328,11 +3351,8 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
* Track task load average for carrying it to new CPU after migrated, and * Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration * track group sched_entity load average for task_h_load calc in migration
*/ */
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) { if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
__update_load_avg(now, cpu, &se->avg, __update_load_avg_se(now, cpu, cfs_rq, se);
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
}
decayed = update_cfs_rq_load_avg(now, cfs_rq, true); decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
decayed |= propagate_entity_load_avg(se); decayed |= propagate_entity_load_avg(se);
...@@ -3437,7 +3457,7 @@ void sync_entity_load_avg(struct sched_entity *se) ...@@ -3437,7 +3457,7 @@ void sync_entity_load_avg(struct sched_entity *se)
u64 last_update_time; u64 last_update_time;
last_update_time = cfs_rq_last_update_time(cfs_rq); last_update_time = cfs_rq_last_update_time(cfs_rq);
__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment