Commit a9e7f654 authored by Tejun Heo's avatar Tejun Heo Committed by Ingo Molnar

sched/fair: Fix O(nr_cgroups) in load balance path

Currently, rq->leaf_cfs_rq_list is a traversal ordered list of all
live cfs_rqs which have ever been active on the CPU; unfortunately,
this makes update_blocked_averages() O(# total cgroups) which isn't
scalable at all.

This shows up as a small CPU consumption and scheduling latency
increase in the load balancing path in systems with CPU controller
enabled across most cgroups.  In an edge case where temporary cgroups
were leaking, this caused the kernel to consume good several tens of
percents of CPU cycles running update_blocked_averages(), each run
taking multiple millisecs.

This patch fixes the issue by taking empty and fully decayed cfs_rqs
off the rq->leaf_cfs_rq_list.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
[ Added cfs_rq_is_decayed() ]
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Cc: Chris Mason <clm@fb.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Turner <pjt@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20170426004350.GB3222@wtj.duckdns.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 502ce005
...@@ -369,8 +369,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -369,8 +369,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
} }
/* Iterate thr' all leaf cfs_rq's on a runqueue */ /* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */ /* Do the two (enqueued) entities belong to the same group ? */
static inline struct cfs_rq * static inline struct cfs_rq *
...@@ -463,8 +464,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -463,8 +464,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{ {
} }
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
static inline struct sched_entity *parent_entity(struct sched_entity *se) static inline struct sched_entity *parent_entity(struct sched_entity *se)
{ {
...@@ -6953,10 +6954,28 @@ static void attach_tasks(struct lb_env *env) ...@@ -6953,10 +6954,28 @@ static void attach_tasks(struct lb_env *env)
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{
if (cfs_rq->load.weight)
return false;
if (cfs_rq->avg.load_sum)
return false;
if (cfs_rq->avg.util_sum)
return false;
if (cfs_rq->runnable_load_sum)
return false;
return true;
}
static void update_blocked_averages(int cpu) static void update_blocked_averages(int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq, *pos;
struct rq_flags rf; struct rq_flags rf;
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
...@@ -6966,7 +6985,7 @@ static void update_blocked_averages(int cpu) ...@@ -6966,7 +6985,7 @@ static void update_blocked_averages(int cpu)
* Iterates the task_group tree in a bottom up fashion, see * Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details. * list_add_leaf_cfs_rq() for details.
*/ */
for_each_leaf_cfs_rq(rq, cfs_rq) { for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
struct sched_entity *se; struct sched_entity *se;
/* throttled entities do not contribute to load */ /* throttled entities do not contribute to load */
...@@ -6980,6 +6999,13 @@ static void update_blocked_averages(int cpu) ...@@ -6980,6 +6999,13 @@ static void update_blocked_averages(int cpu)
se = cfs_rq->tg->se[cpu]; se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se)) if (se && !skip_blocked_update(se))
update_load_avg(se, 0); update_load_avg(se, 0);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
* decayed cfs_rqs linger on the list.
*/
if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
} }
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
} }
...@@ -9503,10 +9529,10 @@ const struct sched_class fair_sched_class = { ...@@ -9503,10 +9529,10 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
void print_cfs_stats(struct seq_file *m, int cpu) void print_cfs_stats(struct seq_file *m, int cpu)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq, *pos;
rcu_read_lock(); rcu_read_lock();
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
print_cfs_rq(m, cpu, cfs_rq); print_cfs_rq(m, cpu, cfs_rq);
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment