Commit f213a6c8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:
 "The main changes in this cycle were:

   - fix affine wakeups (Peter Zijlstra)

   - improve CPU onlining (and general bootup) scalability on systems
     with ridiculous number (thousands) of CPUs (Peter Zijlstra)

   - sched/numa updates (Rik van Riel)

   - sched/deadline updates (Byungchul Park)

   - sched/cpufreq enhancements and related cleanups (Viresh Kumar)

   - sched/debug enhancements (Xie XiuQi)

   - various fixes"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
  sched/debug: Optimize sched_domain sysctl generation
  sched/topology: Avoid pointless rebuild
  sched/topology, cpuset: Avoid spurious/wrong domain rebuilds
  sched/topology: Improve comments
  sched/topology: Fix memory leak in __sdt_alloc()
  sched/completion: Document that reinit_completion() must be called after complete_all()
  sched/autogroup: Fix error reporting printk text in autogroup_create()
  sched/fair: Fix wake_affine() for !NUMA_BALANCING
  sched/debug: Intruduce task_state_to_char() helper function
  sched/debug: Show task state in /proc/sched_debug
  sched/debug: Use task_pid_nr_ns in /proc/$pid/sched
  sched/core: Remove unnecessary initialization init_idle_bootup_task()
  sched/deadline: Change return value of cpudl_find()
  sched/deadline: Make find_later_rq() choose a closer CPU in topology
  sched/numa: Scale scan period with tasks in group and shared/private
  sched/numa: Slow down scan rate if shared faults dominate
  sched/pelt: Fix false running accounting
  sched: Mark pick_next_task_dl() and build_sched_domain() as static
  sched/cpupri: Don't re-initialize 'struct cpupri'
  sched/deadline: Don't re-initialize 'struct cpudl'
  ...
parents 621bee34 bbdacdfe
...@@ -75,12 +75,6 @@ static inline const struct cpumask *cpumask_of_node(int node) ...@@ -75,12 +75,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
extern void setup_node_to_cpumask_map(void); extern void setup_node_to_cpumask_map(void);
/*
* Returns the number of the node containing Node 'node'. This
* architecture is flat, so it is a pretty simple function!
*/
#define parent_node(node) (node)
#define pcibus_to_node(bus) __pcibus_to_node(bus) #define pcibus_to_node(bus) __pcibus_to_node(bus)
extern int __node_distance(int, int); extern int __node_distance(int, int);
......
...@@ -1408,12 +1408,13 @@ static const struct file_operations proc_fail_nth_operations = { ...@@ -1408,12 +1408,13 @@ static const struct file_operations proc_fail_nth_operations = {
static int sched_show(struct seq_file *m, void *v) static int sched_show(struct seq_file *m, void *v)
{ {
struct inode *inode = m->private; struct inode *inode = m->private;
struct pid_namespace *ns = inode->i_sb->s_fs_info;
struct task_struct *p; struct task_struct *p;
p = get_proc_task(inode); p = get_proc_task(inode);
if (!p) if (!p)
return -ESRCH; return -ESRCH;
proc_sched_show_task(p, m); proc_sched_show_task(p, ns, m);
put_task_struct(p); put_task_struct(p);
......
...@@ -1233,6 +1233,19 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) ...@@ -1233,6 +1233,19 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
return task_pgrp_nr_ns(tsk, &init_pid_ns); return task_pgrp_nr_ns(tsk, &init_pid_ns);
} }
static inline char task_state_to_char(struct task_struct *task)
{
const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
unsigned long state = task->state;
state = state ? __ffs(state) + 1 : 0;
/* Make sure the string lines up properly with the number of task states: */
BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1);
return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?';
}
/** /**
* is_global_init - check if a task structure is init. Since init * is_global_init - check if a task structure is init. Since init
* is free to have sub-threads we need to check tgid. * is free to have sub-threads we need to check tgid.
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
struct task_struct; struct task_struct;
struct pid_namespace;
extern void dump_cpu_task(int cpu); extern void dump_cpu_task(int cpu);
...@@ -34,7 +35,8 @@ extern void sched_show_task(struct task_struct *p); ...@@ -34,7 +35,8 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
struct seq_file; struct seq_file;
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); extern void proc_sched_show_task(struct task_struct *p,
struct pid_namespace *ns, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p); extern void proc_sched_set_task(struct task_struct *p);
#endif #endif
......
...@@ -30,7 +30,6 @@ extern int lockdep_tasklist_lock_is_held(void); ...@@ -30,7 +30,6 @@ extern int lockdep_tasklist_lock_is_held(void);
extern asmlinkage void schedule_tail(struct task_struct *prev); extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p); extern void sched_dead(struct task_struct *p);
......
...@@ -71,6 +71,14 @@ struct sched_domain_shared { ...@@ -71,6 +71,14 @@ struct sched_domain_shared {
atomic_t ref; atomic_t ref;
atomic_t nr_busy_cpus; atomic_t nr_busy_cpus;
int has_idle_cores; int has_idle_cores;
/*
* Some variables from the most recent sd_lb_stats for this domain,
* used by wake_affine().
*/
unsigned long nr_running;
unsigned long load;
unsigned long capacity;
}; };
struct sched_domain { struct sched_domain {
......
...@@ -430,7 +430,6 @@ static noinline void __ref rest_init(void) ...@@ -430,7 +430,6 @@ static noinline void __ref rest_init(void)
* The boot idle thread must execute schedule() * The boot idle thread must execute schedule()
* at least once to get things moving: * at least once to get things moving:
*/ */
init_idle_bootup_task(current);
schedule_preempt_disabled(); schedule_preempt_disabled();
/* Call into cpu_idle with preempt disabled */ /* Call into cpu_idle with preempt disabled */
cpu_startup_entry(CPUHP_ONLINE); cpu_startup_entry(CPUHP_ONLINE);
......
...@@ -2344,13 +2344,7 @@ void cpuset_update_active_cpus(void) ...@@ -2344,13 +2344,7 @@ void cpuset_update_active_cpus(void)
* We're inside cpu hotplug critical region which usually nests * We're inside cpu hotplug critical region which usually nests
* inside cgroup synchronization. Bounce actual hotplug processing * inside cgroup synchronization. Bounce actual hotplug processing
* to a work item to avoid reverse locking order. * to a work item to avoid reverse locking order.
*
* We still need to do partition_sched_domains() synchronously;
* otherwise, the scheduler will get confused and put tasks to the
* dead CPU. Fall back to the default single domain.
* cpuset_hotplug_workfn() will rebuild it as necessary.
*/ */
partition_sched_domains(1, NULL, NULL);
schedule_work(&cpuset_hotplug_work); schedule_work(&cpuset_hotplug_work);
} }
......
...@@ -71,7 +71,6 @@ static inline struct autogroup *autogroup_create(void) ...@@ -71,7 +71,6 @@ static inline struct autogroup *autogroup_create(void)
goto out_fail; goto out_fail;
tg = sched_create_group(&root_task_group); tg = sched_create_group(&root_task_group);
if (IS_ERR(tg)) if (IS_ERR(tg))
goto out_free; goto out_free;
...@@ -101,7 +100,7 @@ static inline struct autogroup *autogroup_create(void) ...@@ -101,7 +100,7 @@ static inline struct autogroup *autogroup_create(void)
out_fail: out_fail:
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_WARNING "autogroup_create: %s failure.\n", printk(KERN_WARNING "autogroup_create: %s failure.\n",
ag ? "sched_create_group()" : "kmalloc()"); ag ? "sched_create_group()" : "kzalloc()");
} }
return autogroup_kref_get(&autogroup_default); return autogroup_kref_get(&autogroup_default);
......
...@@ -47,6 +47,13 @@ EXPORT_SYMBOL(complete); ...@@ -47,6 +47,13 @@ EXPORT_SYMBOL(complete);
* *
* It may be assumed that this function implies a write memory barrier before * It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up. * changing the task state if and only if any tasks are woken up.
*
* Since complete_all() sets the completion of @x permanently to done
* to allow multiple waiters to finish, a call to reinit_completion()
* must be used on @x if @x is to be used again. The code must make
* sure that all waiters have woken and finished before reinitializing
* @x. Also note that the function completion_done() can not be used
* to know if there are still waiters after complete_all() has been called.
*/ */
void complete_all(struct completion *x) void complete_all(struct completion *x)
{ {
...@@ -297,6 +304,7 @@ EXPORT_SYMBOL(try_wait_for_completion); ...@@ -297,6 +304,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
* Return: 0 if there are waiters (wait_for_completion() in progress) * Return: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters. * 1 if there are no waiters.
* *
* Note, this will always return true if complete_all() was called on @X.
*/ */
bool completion_done(struct completion *x) bool completion_done(struct completion *x)
{ {
......
...@@ -5133,24 +5133,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ...@@ -5133,24 +5133,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
return retval; return retval;
} }
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
void sched_show_task(struct task_struct *p) void sched_show_task(struct task_struct *p)
{ {
unsigned long free = 0; unsigned long free = 0;
int ppid; int ppid;
unsigned long state = p->state;
/* Make sure the string lines up properly with the number of task states: */
BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1);
if (!try_get_task_stack(p)) if (!try_get_task_stack(p))
return; return;
if (state)
state = __ffs(state) + 1; printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); if (p->state == TASK_RUNNING)
if (state == TASK_RUNNING)
printk(KERN_CONT " running task "); printk(KERN_CONT " running task ");
#ifdef CONFIG_DEBUG_STACK_USAGE #ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p); free = stack_not_used(p);
...@@ -5207,11 +5200,6 @@ void show_state_filter(unsigned long state_filter) ...@@ -5207,11 +5200,6 @@ void show_state_filter(unsigned long state_filter)
debug_show_all_locks(); debug_show_all_locks();
} }
void init_idle_bootup_task(struct task_struct *idle)
{
idle->sched_class = &idle_sched_class;
}
/** /**
* init_idle - set up an idle thread for a given CPU * init_idle - set up an idle thread for a given CPU
* @idle: task in question * @idle: task in question
...@@ -5468,7 +5456,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) ...@@ -5468,7 +5456,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
*/ */
next = pick_next_task(rq, &fake_task, rf); next = pick_next_task(rq, &fake_task, rf);
BUG_ON(!next); BUG_ON(!next);
next->sched_class->put_prev_task(rq, next); put_prev_task(rq, next);
/* /*
* Rules for changing task_struct::cpus_allowed are holding * Rules for changing task_struct::cpus_allowed are holding
......
...@@ -119,29 +119,29 @@ static inline int cpudl_maximum(struct cpudl *cp) ...@@ -119,29 +119,29 @@ static inline int cpudl_maximum(struct cpudl *cp)
* @p: the task * @p: the task
* @later_mask: a mask to fill in with the selected CPUs (or NULL) * @later_mask: a mask to fill in with the selected CPUs (or NULL)
* *
* Returns: int - best CPU (heap maximum if suitable) * Returns: int - CPUs were found
*/ */
int cpudl_find(struct cpudl *cp, struct task_struct *p, int cpudl_find(struct cpudl *cp, struct task_struct *p,
struct cpumask *later_mask) struct cpumask *later_mask)
{ {
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl; const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask && if (later_mask &&
cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
best_cpu = cpumask_any(later_mask); return 1;
goto out; } else {
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && int best_cpu = cpudl_maximum(cp);
dl_time_before(dl_se->deadline, cp->elements[0].dl)) { WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
best_cpu = cpudl_maximum(cp);
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
}
out: if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
return best_cpu; return 1;
}
}
return 0;
} }
/* /*
...@@ -246,7 +246,6 @@ int cpudl_init(struct cpudl *cp) ...@@ -246,7 +246,6 @@ int cpudl_init(struct cpudl *cp)
{ {
int i; int i;
memset(cp, 0, sizeof(*cp));
raw_spin_lock_init(&cp->lock); raw_spin_lock_init(&cp->lock);
cp->size = 0; cp->size = 0;
......
...@@ -209,8 +209,6 @@ int cpupri_init(struct cpupri *cp) ...@@ -209,8 +209,6 @@ int cpupri_init(struct cpupri *cp)
{ {
int i; int i;
memset(cp, 0, sizeof(*cp));
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[i]; struct cpupri_vec *vec = &cp->pri_to_cpu[i];
......
...@@ -1594,7 +1594,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) ...@@ -1594,7 +1594,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* let's hope p can move out. * let's hope p can move out.
*/ */
if (rq->curr->nr_cpus_allowed == 1 || if (rq->curr->nr_cpus_allowed == 1 ||
cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
return; return;
/* /*
...@@ -1602,7 +1602,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) ...@@ -1602,7 +1602,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* see if it is pushed or pulled somewhere else. * see if it is pushed or pulled somewhere else.
*/ */
if (p->nr_cpus_allowed != 1 && if (p->nr_cpus_allowed != 1 &&
cpudl_find(&rq->rd->cpudl, p, NULL) != -1) cpudl_find(&rq->rd->cpudl, p, NULL))
return; return;
resched_curr(rq); resched_curr(rq);
...@@ -1655,7 +1655,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, ...@@ -1655,7 +1655,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
return rb_entry(left, struct sched_dl_entity, rb_node); return rb_entry(left, struct sched_dl_entity, rb_node);
} }
struct task_struct * static struct task_struct *
pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{ {
struct sched_dl_entity *dl_se; struct sched_dl_entity *dl_se;
...@@ -1798,7 +1798,7 @@ static int find_later_rq(struct task_struct *task) ...@@ -1798,7 +1798,7 @@ static int find_later_rq(struct task_struct *task)
struct sched_domain *sd; struct sched_domain *sd;
struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
int best_cpu, cpu = task_cpu(task); int cpu = task_cpu(task);
/* Make sure the mask is initialized first */ /* Make sure the mask is initialized first */
if (unlikely(!later_mask)) if (unlikely(!later_mask))
...@@ -1811,17 +1811,14 @@ static int find_later_rq(struct task_struct *task) ...@@ -1811,17 +1811,14 @@ static int find_later_rq(struct task_struct *task)
* We have to consider system topology and task affinity * We have to consider system topology and task affinity
* first, then we can look for a suitable cpu. * first, then we can look for a suitable cpu.
*/ */
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
task, later_mask);
if (best_cpu == -1)
return -1; return -1;
/* /*
* If we are here, some target has been found, * If we are here, some targets have been found, including
* the most suitable of which is cached in best_cpu. * the most suitable which is, among the runqueues where the
* This is, among the runqueues where the current tasks * current tasks have later deadlines than the task's one, the
* have later deadlines than the task's one, the rq * rq with the latest possible one.
* with the latest possible one.
* *
* Now we check how well this matches with task's * Now we check how well this matches with task's
* affinity and system topology. * affinity and system topology.
...@@ -1841,6 +1838,7 @@ static int find_later_rq(struct task_struct *task) ...@@ -1841,6 +1838,7 @@ static int find_later_rq(struct task_struct *task)
rcu_read_lock(); rcu_read_lock();
for_each_domain(cpu, sd) { for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) { if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/* /*
* If possible, preempting this_cpu is * If possible, preempting this_cpu is
...@@ -1852,12 +1850,15 @@ static int find_later_rq(struct task_struct *task) ...@@ -1852,12 +1850,15 @@ static int find_later_rq(struct task_struct *task)
return this_cpu; return this_cpu;
} }
best_cpu = cpumask_first_and(later_mask,
sched_domain_span(sd));
/* /*
* Last chance: if best_cpu is valid and is * Last chance: if a cpu being in both later_mask
* in the mask, that becomes our choice. * and current sd span is valid, that becomes our
* choice. Of course, the latest possible cpu is
* already under consideration through later_mask.
*/ */
if (best_cpu < nr_cpu_ids && if (best_cpu < nr_cpu_ids) {
cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
rcu_read_unlock(); rcu_read_unlock();
return best_cpu; return best_cpu;
} }
......
...@@ -327,38 +327,78 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) ...@@ -327,38 +327,78 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
return table; return table;
} }
static cpumask_var_t sd_sysctl_cpus;
static struct ctl_table_header *sd_sysctl_header; static struct ctl_table_header *sd_sysctl_header;
void register_sched_domain_sysctl(void) void register_sched_domain_sysctl(void)
{ {
int i, cpu_num = num_possible_cpus(); static struct ctl_table *cpu_entries;
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); static struct ctl_table **cpu_idx;
char buf[32]; char buf[32];
int i;
WARN_ON(sd_ctl_dir[0].child); if (!cpu_entries) {
sd_ctl_dir[0].child = entry; cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
if (!cpu_entries)
return;
if (entry == NULL) WARN_ON(sd_ctl_dir[0].child);
return; sd_ctl_dir[0].child = cpu_entries;
}
for_each_possible_cpu(i) { if (!cpu_idx) {
snprintf(buf, 32, "cpu%d", i); struct ctl_table *e = cpu_entries;
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555; cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
entry->child = sd_alloc_ctl_cpu_table(i); if (!cpu_idx)
entry++; return;
/* deal with sparse possible map */
for_each_possible_cpu(i) {
cpu_idx[i] = e;
e++;
}
}
if (!cpumask_available(sd_sysctl_cpus)) {
if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
return;
/* init to possible to not have holes in @cpu_entries */
cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
}
for_each_cpu(i, sd_sysctl_cpus) {
struct ctl_table *e = cpu_idx[i];
if (e->child)
sd_free_ctl_entry(&e->child);
if (!e->procname) {
snprintf(buf, 32, "cpu%d", i);
e->procname = kstrdup(buf, GFP_KERNEL);
}
e->mode = 0555;
e->child = sd_alloc_ctl_cpu_table(i);
__cpumask_clear_cpu(i, sd_sysctl_cpus);
} }
WARN_ON(sd_sysctl_header); WARN_ON(sd_sysctl_header);
sd_sysctl_header = register_sysctl_table(sd_ctl_root); sd_sysctl_header = register_sysctl_table(sd_ctl_root);
} }
void dirty_sched_domain_sysctl(int cpu)
{
if (cpumask_available(sd_sysctl_cpus))
__cpumask_set_cpu(cpu, sd_sysctl_cpus);
}
/* may be called multiple times per register */ /* may be called multiple times per register */
void unregister_sched_domain_sysctl(void) void unregister_sched_domain_sysctl(void)
{ {
unregister_sysctl_table(sd_sysctl_header); unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL; sd_sysctl_header = NULL;
if (sd_ctl_dir[0].child)
sd_free_ctl_entry(&sd_ctl_dir[0].child);
} }
#endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SYSCTL */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -421,13 +461,15 @@ static char *task_group_path(struct task_group *tg) ...@@ -421,13 +461,15 @@ static char *task_group_path(struct task_group *tg)
} }
#endif #endif
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
static void static void
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{ {
if (rq->curr == p) if (rq->curr == p)
SEQ_printf(m, "R"); SEQ_printf(m, ">R");
else else
SEQ_printf(m, " "); SEQ_printf(m, " %c", task_state_to_char(p));
SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
p->comm, task_pid_nr(p), p->comm, task_pid_nr(p),
...@@ -456,9 +498,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) ...@@ -456,9 +498,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
SEQ_printf(m, SEQ_printf(m,
"\nrunnable tasks:\n" "\nrunnable tasks:\n"
" task PID tree-key switches prio" " S task PID tree-key switches prio"
" wait-time sum-exec sum-sleep\n" " wait-time sum-exec sum-sleep\n"
"------------------------------------------------------" "-------------------------------------------------------"
"----------------------------------------------------\n"); "----------------------------------------------------\n");
rcu_read_lock(); rcu_read_lock();
...@@ -872,11 +914,12 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) ...@@ -872,11 +914,12 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
#endif #endif
} }
void proc_sched_show_task(struct task_struct *p, struct seq_file *m) void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
struct seq_file *m)
{ {
unsigned long nr_switches; unsigned long nr_switches;
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
get_nr_threads(p)); get_nr_threads(p));
SEQ_printf(m, SEQ_printf(m,
"---------------------------------------------------------" "---------------------------------------------------------"
......
This diff is collapsed.
...@@ -1120,11 +1120,15 @@ extern int group_balance_cpu(struct sched_group *sg); ...@@ -1120,11 +1120,15 @@ extern int group_balance_cpu(struct sched_group *sg);
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
void register_sched_domain_sysctl(void); void register_sched_domain_sysctl(void);
void dirty_sched_domain_sysctl(int cpu);
void unregister_sched_domain_sysctl(void); void unregister_sched_domain_sysctl(void);
#else #else
static inline void register_sched_domain_sysctl(void) static inline void register_sched_domain_sysctl(void)
{ {
} }
static inline void dirty_sched_domain_sysctl(int cpu)
{
}
static inline void unregister_sched_domain_sysctl(void) static inline void unregister_sched_domain_sysctl(void)
{ {
} }
......
...@@ -261,8 +261,6 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -261,8 +261,6 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
static int init_rootdomain(struct root_domain *rd) static int init_rootdomain(struct root_domain *rd)
{ {
memset(rd, 0, sizeof(*rd));
if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
goto out; goto out;
if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
...@@ -311,7 +309,7 @@ static struct root_domain *alloc_rootdomain(void) ...@@ -311,7 +309,7 @@ static struct root_domain *alloc_rootdomain(void)
{ {
struct root_domain *rd; struct root_domain *rd;
rd = kmalloc(sizeof(*rd), GFP_KERNEL); rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd) if (!rd)
return NULL; return NULL;
...@@ -337,7 +335,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc) ...@@ -337,7 +335,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc)
if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
kfree(sg->sgc); kfree(sg->sgc);
kfree(sg); if (atomic_dec_and_test(&sg->ref))
kfree(sg);
sg = tmp; sg = tmp;
} while (sg != first); } while (sg != first);
} }
...@@ -345,15 +344,12 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc) ...@@ -345,15 +344,12 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc)
static void destroy_sched_domain(struct sched_domain *sd) static void destroy_sched_domain(struct sched_domain *sd)
{ {
/* /*
* If its an overlapping domain it has private groups, iterate and * A normal sched domain may have multiple group references, an
* nuke them all. * overlapping domain, having private groups, only one. Iterate,
* dropping group/capacity references, freeing where none remain.
*/ */
if (sd->flags & SD_OVERLAP) { free_sched_groups(sd->groups, 1);
free_sched_groups(sd->groups, 1);
} else if (atomic_dec_and_test(&sd->groups->ref)) {
kfree(sd->groups->sgc);
kfree(sd->groups);
}
if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
kfree(sd->shared); kfree(sd->shared);
kfree(sd); kfree(sd);
...@@ -463,6 +459,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) ...@@ -463,6 +459,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
rq_attach_root(rq, rd); rq_attach_root(rq, rd);
tmp = rq->sd; tmp = rq->sd;
rcu_assign_pointer(rq->sd, sd); rcu_assign_pointer(rq->sd, sd);
dirty_sched_domain_sysctl(cpu);
destroy_sched_domains(tmp); destroy_sched_domains(tmp);
update_top_cache_domain(cpu); update_top_cache_domain(cpu);
...@@ -670,6 +667,7 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) ...@@ -670,6 +667,7 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
else else
cpumask_copy(sg_span, sched_domain_span(sd)); cpumask_copy(sg_span, sched_domain_span(sd));
atomic_inc(&sg->ref);
return sg; return sg;
} }
...@@ -1595,7 +1593,7 @@ static void __sdt_free(const struct cpumask *cpu_map) ...@@ -1595,7 +1593,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
} }
} }
struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr, const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int cpu) struct sched_domain *child, int cpu)
{ {
...@@ -1854,7 +1852,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ...@@ -1854,7 +1852,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
/* Let the architecture update CPU core mappings: */ /* Let the architecture update CPU core mappings: */
new_topology = arch_update_cpu_topology(); new_topology = arch_update_cpu_topology();
n = doms_new ? ndoms_new : 0; if (!doms_new) {
WARN_ON_ONCE(dattr_new);
n = 0;
doms_new = alloc_sched_domains(1);
if (doms_new) {
n = 1;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
}
} else {
n = ndoms_new;
}
/* Destroy deleted domains: */ /* Destroy deleted domains: */
for (i = 0; i < ndoms_cur; i++) { for (i = 0; i < ndoms_cur; i++) {
...@@ -1870,11 +1878,10 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ...@@ -1870,11 +1878,10 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
} }
n = ndoms_cur; n = ndoms_cur;
if (doms_new == NULL) { if (!doms_new) {
n = 0; n = 0;
doms_new = &fallback_doms; doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
} }
/* Build new domains: */ /* Build new domains: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment