Commit 889cb3b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Smaller fixlets"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Fix kernel-doc warnings in kernel/sched/fair.c
  sched: Unthrottle rt runqueues in __disable_runtime()
  sched: Add missing call to calc_load_exit_idle()
  sched: Fix load avg vs cpu-hotplug
parents 7ef6e973 9450d57e
...@@ -5304,27 +5304,17 @@ void idle_task_exit(void) ...@@ -5304,27 +5304,17 @@ void idle_task_exit(void)
} }
/* /*
* While a dead CPU has no uninterruptible tasks queued at this point, * Since this CPU is going 'away' for a while, fold any nr_active delta
* it might still have a nonzero ->nr_uninterruptible counter, because * we might have. Assumes we're called after migrate_tasks() so that the
* for performance reasons the counter is not stricly tracking tasks to * nr_active count is stable.
* their home CPUs. So we just add the counter to another CPU's counter, *
* to keep the global sum constant after CPU-down: * Also see the comment "Global load-average calculations".
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
rq_src->nr_uninterruptible = 0;
}
/*
* remove the tasks which were accounted by rq from calc_load_tasks.
*/ */
static void calc_global_load_remove(struct rq *rq) static void calc_load_migrate(struct rq *rq)
{ {
atomic_long_sub(rq->calc_load_active, &calc_load_tasks); long delta = calc_load_fold_active(rq);
rq->calc_load_active = 0; if (delta)
atomic_long_add(delta, &calc_load_tasks);
} }
/* /*
...@@ -5352,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu) ...@@ -5352,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
*/ */
rq->stop = NULL; rq->stop = NULL;
/* Ensure any throttled groups are reachable by pick_next_task */
unthrottle_offline_cfs_rqs(rq);
for ( ; ; ) { for ( ; ; ) {
/* /*
* There's this thread running, bail when that's the only * There's this thread running, bail when that's the only
...@@ -5618,8 +5605,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5618,8 +5605,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(rq->nr_running != 1); /* the migration thread */ BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
migrate_nr_uninterruptible(rq); calc_load_migrate(rq);
calc_global_load_remove(rq);
break; break;
#endif #endif
} }
......
...@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) ...@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
hrtimer_cancel(&cfs_b->slack_timer); hrtimer_cancel(&cfs_b->slack_timer);
} }
void unthrottle_offline_cfs_rqs(struct rq *rq) static void unthrottle_offline_cfs_rqs(struct rq *rq)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
...@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) ...@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
return NULL; return NULL;
} }
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
void unthrottle_offline_cfs_rqs(struct rq *rq) {} static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
#endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_CFS_BANDWIDTH */
...@@ -3658,7 +3658,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) ...@@ -3658,7 +3658,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
* @group: sched_group whose statistics are to be updated. * @group: sched_group whose statistics are to be updated.
* @load_idx: Load index of sched_domain of this_cpu for load calc. * @load_idx: Load index of sched_domain of this_cpu for load calc.
* @local_group: Does group contain this_cpu. * @local_group: Does group contain this_cpu.
* @cpus: Set of cpus considered for load balancing.
* @balance: Should we balance. * @balance: Should we balance.
* @sgs: variable to hold the statistics for this group. * @sgs: variable to hold the statistics for this group.
*/ */
...@@ -3805,7 +3804,6 @@ static bool update_sd_pick_busiest(struct lb_env *env, ...@@ -3805,7 +3804,6 @@ static bool update_sd_pick_busiest(struct lb_env *env,
/** /**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing. * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment. * @env: The load balancing environment.
* @cpus: Set of cpus considered for load balancing.
* @balance: Should we balance. * @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain. * @sds: variable to hold the statistics for this sched_domain.
*/ */
...@@ -4956,6 +4954,9 @@ static void rq_online_fair(struct rq *rq) ...@@ -4956,6 +4954,9 @@ static void rq_online_fair(struct rq *rq)
static void rq_offline_fair(struct rq *rq) static void rq_offline_fair(struct rq *rq)
{ {
update_sysctl(); update_sysctl();
/* Ensure any throttled groups are reachable by pick_next_task */
unthrottle_offline_cfs_rqs(rq);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -691,6 +691,7 @@ static void __disable_runtime(struct rq *rq) ...@@ -691,6 +691,7 @@ static void __disable_runtime(struct rq *rq)
* runtime - in which case borrowing doesn't make sense. * runtime - in which case borrowing doesn't make sense.
*/ */
rt_rq->rt_runtime = RUNTIME_INF; rt_rq->rt_runtime = RUNTIME_INF;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock);
} }
......
...@@ -1144,7 +1144,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu); ...@@ -1144,7 +1144,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
extern void init_cfs_rq(struct cfs_rq *cfs_rq); extern void init_cfs_rq(struct cfs_rq *cfs_rq);
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
extern void unthrottle_offline_cfs_rqs(struct rq *rq);
extern void account_cfs_bandwidth_used(int enabled, int was_enabled); extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
......
...@@ -573,6 +573,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) ...@@ -573,6 +573,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
tick_do_update_jiffies64(now); tick_do_update_jiffies64(now);
update_cpu_load_nohz(); update_cpu_load_nohz();
calc_load_exit_idle();
touch_softlockup_watchdog(); touch_softlockup_watchdog();
/* /*
* Cancel the scheduled timer and restore the tick * Cancel the scheduled timer and restore the tick
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment