Commit 630246a0 authored by Quentin Perret's avatar Quentin Perret Committed by Ingo Molnar

sched/fair: Clean-up update_sg_lb_stats parameters

In preparation for the introduction of a new root domain flag which can
be set during load balance (the 'overutilized' flag), clean-up the set
of parameters passed to update_sg_lb_stats(). More specifically, the
'local_group' and 'local_idx' parameters can be removed since they can
easily be reconstructed from within the function.

While at it, transform the 'overload' parameter into a flag stored in
the 'sg_status' parameter hence facilitating the definition of new flags
when needed.
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Suggested-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarQuentin Perret <quentin.perret@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: adharmap@codeaurora.org
Cc: chris.redpath@arm.com
Cc: currojerez@riseup.net
Cc: dietmar.eggemann@arm.com
Cc: edubezval@gmail.com
Cc: gregkh@linuxfoundation.org
Cc: javi.merino@kernel.org
Cc: joel@joelfernandes.org
Cc: juri.lelli@redhat.com
Cc: morten.rasmussen@arm.com
Cc: patrick.bellasi@arm.com
Cc: pkondeti@codeaurora.org
Cc: rjw@rjwysocki.net
Cc: skannan@codeaurora.org
Cc: smuckle@google.com
Cc: srinivas.pandruvada@linux.intel.com
Cc: thara.gopinath@linaro.org
Cc: tkjos@google.com
Cc: vincent.guittot@linaro.org
Cc: viresh.kumar@linaro.org
Link: https://lkml.kernel.org/r/20181203095628.11858-12-quentin.perret@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1f74de87
...@@ -7905,16 +7905,16 @@ static bool update_nohz_stats(struct rq *rq, bool force) ...@@ -7905,16 +7905,16 @@ static bool update_nohz_stats(struct rq *rq, bool force)
* update_sg_lb_stats - Update sched_group's statistics for load balancing. * update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment. * @env: The load balancing environment.
* @group: sched_group whose statistics are to be updated. * @group: sched_group whose statistics are to be updated.
* @load_idx: Load index of sched_domain of this_cpu for load calc.
* @local_group: Does group contain this_cpu.
* @sgs: variable to hold the statistics for this group. * @sgs: variable to hold the statistics for this group.
* @overload: Indicate pullable load (e.g. >1 runnable task). * @sg_status: Holds flag indicating the status of the sched_group
*/ */
static inline void update_sg_lb_stats(struct lb_env *env, static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx, struct sched_group *group,
int local_group, struct sg_lb_stats *sgs, struct sg_lb_stats *sgs,
bool *overload) int *sg_status)
{ {
int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
int load_idx = get_sd_load_idx(env->sd, env->idle);
unsigned long load; unsigned long load;
int i, nr_running; int i, nr_running;
...@@ -7938,7 +7938,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, ...@@ -7938,7 +7938,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
nr_running = rq->nr_running; nr_running = rq->nr_running;
if (nr_running > 1) if (nr_running > 1)
*overload = true; *sg_status |= SG_OVERLOAD;
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running; sgs->nr_numa_running += rq->nr_numa_running;
...@@ -7954,7 +7954,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, ...@@ -7954,7 +7954,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (env->sd->flags & SD_ASYM_CPUCAPACITY && if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
sgs->group_misfit_task_load < rq->misfit_task_load) { sgs->group_misfit_task_load < rq->misfit_task_load) {
sgs->group_misfit_task_load = rq->misfit_task_load; sgs->group_misfit_task_load = rq->misfit_task_load;
*overload = 1; *sg_status |= SG_OVERLOAD;
} }
} }
...@@ -8099,17 +8099,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd ...@@ -8099,17 +8099,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
struct sched_group *sg = env->sd->groups; struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat; struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs; struct sg_lb_stats tmp_sgs;
int load_idx;
bool overload = false;
bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING; bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
int sg_status = 0;
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
env->flags |= LBF_NOHZ_STATS; env->flags |= LBF_NOHZ_STATS;
#endif #endif
load_idx = get_sd_load_idx(env->sd, env->idle);
do { do {
struct sg_lb_stats *sgs = &tmp_sgs; struct sg_lb_stats *sgs = &tmp_sgs;
int local_group; int local_group;
...@@ -8124,8 +8121,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd ...@@ -8124,8 +8121,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
update_group_capacity(env->sd, env->dst_cpu); update_group_capacity(env->sd, env->dst_cpu);
} }
update_sg_lb_stats(env, sg, load_idx, local_group, sgs, update_sg_lb_stats(env, sg, sgs, &sg_status);
&overload);
if (local_group) if (local_group)
goto next_group; goto next_group;
...@@ -8175,8 +8171,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd ...@@ -8175,8 +8171,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (!env->sd->parent) { if (!env->sd->parent) {
/* update overload indicator if we are at root domain */ /* update overload indicator if we are at root domain */
if (READ_ONCE(env->dst_rq->rd->overload) != overload) WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
WRITE_ONCE(env->dst_rq->rd->overload, overload);
} }
} }
......
...@@ -716,6 +716,9 @@ struct perf_domain { ...@@ -716,6 +716,9 @@ struct perf_domain {
struct rcu_head rcu; struct rcu_head rcu;
}; };
/* Scheduling group status flags */
#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */
/* /*
* We add the notion of a root-domain which will be used to define per-domain * We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by * variables. Each exclusive cpuset essentially defines an island domain by
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment