Commit 039ae8bc authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/fair: Fix O(nr_cgroups) in the load balancing path

This re-applies the commit reverted here:

  commit c40f7d74 ("sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f654")

I.e. now that cfs_rq can be safely removed/added in the list, we can re-apply:

 commit a9e7f654 ("sched/fair: Fix O(nr_cgroups) in load balance path")
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: sargun@sargun.me
Cc: tj@kernel.org
Cc: xiexiuqi@huawei.com
Cc: xiezhipeng1@huawei.com
Link: https://lkml.kernel.org/r/1549469662-13614-3-git-send-email-vincent.guittot@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 31bc6aea
...@@ -368,9 +368,10 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq) ...@@ -368,9 +368,10 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
} }
/* Iterate through all cfs_rq's on a runqueue in bottom-up order */ /* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */ /* Do the two (enqueued) entities belong to the same group ? */
static inline struct cfs_rq * static inline struct cfs_rq *
...@@ -461,8 +462,8 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq) ...@@ -461,8 +462,8 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{ {
} }
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
static inline struct sched_entity *parent_entity(struct sched_entity *se) static inline struct sched_entity *parent_entity(struct sched_entity *se)
{ {
...@@ -7702,10 +7703,27 @@ static inline bool others_have_blocked(struct rq *rq) ...@@ -7702,10 +7703,27 @@ static inline bool others_have_blocked(struct rq *rq)
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
if (cfs_rq->load.weight)
return false;
if (cfs_rq->avg.load_sum)
return false;
if (cfs_rq->avg.util_sum)
return false;
if (cfs_rq->avg.runnable_load_sum)
return false;
return true;
}
static void update_blocked_averages(int cpu) static void update_blocked_averages(int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq, *pos;
const struct sched_class *curr_class; const struct sched_class *curr_class;
struct rq_flags rf; struct rq_flags rf;
bool done = true; bool done = true;
...@@ -7717,7 +7735,7 @@ static void update_blocked_averages(int cpu) ...@@ -7717,7 +7735,7 @@ static void update_blocked_averages(int cpu)
* Iterates the task_group tree in a bottom up fashion, see * Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details. * list_add_leaf_cfs_rq() for details.
*/ */
for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se; struct sched_entity *se;
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
...@@ -7728,6 +7746,13 @@ static void update_blocked_averages(int cpu) ...@@ -7728,6 +7746,13 @@ static void update_blocked_averages(int cpu)
if (se && !skip_blocked_update(se)) if (se && !skip_blocked_update(se))
update_load_avg(cfs_rq_of(se), se, 0); update_load_avg(cfs_rq_of(se), se, 0);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
* decayed cfs_rqs linger on the list.
*/
if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
/* Don't need periodic decay once load/util_avg are null */ /* Don't need periodic decay once load/util_avg are null */
if (cfs_rq_has_blocked(cfs_rq)) if (cfs_rq_has_blocked(cfs_rq))
done = false; done = false;
...@@ -10609,10 +10634,10 @@ const struct sched_class fair_sched_class = { ...@@ -10609,10 +10634,10 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
void print_cfs_stats(struct seq_file *m, int cpu) void print_cfs_stats(struct seq_file *m, int cpu)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq, *pos;
rcu_read_lock(); rcu_read_lock();
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
print_cfs_rq(m, cpu, cfs_rq); print_cfs_rq(m, cpu, cfs_rq);
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment