Commit 7e28b271 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Fix comment for sched_info_depart
  sched/Documentation: Update sched-design-CFS.txt documentation
  sched/debug: Take PID namespace into account
  sched/fair: Fix small race where child->se.parent,cfs_rq might point to invalid ones
parents 186844b2 13b62e46
......@@ -66,9 +66,7 @@ rq->cfs.load value, which is the sum of the weights of the tasks queued on the
runqueue.
CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the
p->se.vruntime key (there is a subtraction using rq->cfs.min_vruntime to
account for possible wraparounds). CFS picks the "leftmost" task from this
tree and sticks to it.
p->se.vruntime key. CFS picks the "leftmost" task from this tree and sticks to it.
As the system progresses forwards, the executed tasks are put into the tree
more and more to the right --- slowly but surely giving a chance for every task
to become the "leftmost task" and thus get on the CPU within a deterministic
......
......@@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SEQ_printf(m, " ");
SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
p->comm, p->pid,
p->comm, task_pid_nr(p),
SPLIT_NS(p->se.vruntime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
......@@ -289,7 +289,7 @@ do { \
P(nr_load_updates);
P(nr_uninterruptible);
PN(next_balance);
P(curr->pid);
SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
PN(clock);
P(cpu_load[0]);
P(cpu_load[1]);
......@@ -492,7 +492,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
SEQ_printf(m,
"---------------------------------------------------------"
......
......@@ -5928,11 +5928,15 @@ static void task_fork_fair(struct task_struct *p)
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
if (unlikely(task_cpu(p) != this_cpu)) {
/*
* Not only the cpu but also the task_group of the parent might have
* been changed after parent->se.parent,cfs_rq were copied to
* child->se.parent,cfs_rq. So call __set_task_cpu() to make those
* of child point to valid ones.
*/
rcu_read_lock();
__set_task_cpu(p, this_cpu);
rcu_read_unlock();
}
update_curr(cfs_rq);
......
......@@ -104,8 +104,9 @@ static inline void sched_info_queued(struct task_struct *t)
}
/*
* Called when a process ceases being the active-running process, either
* voluntarily or involuntarily. Now we can calculate how long we ran.
* Called when a process ceases being the active-running process involuntarily
* due, typically, to expiring its time slice (this may also be called when
* switching to the idle task). Now we can calculate how long we ran.
* Also, if the process is still in the TASK_RUNNING state, call
* sched_info_queued() to mark that it has now again started waiting on
* the runqueue.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment