Commit 6052eb87 authored by Andi Kleen's avatar Andi Kleen Committed by Greg Kroah-Hartman

kernek/fork.c: allocate idle task for a CPU always on its local node

commit 725fc629 upstream.

Linux preallocates the task structs of the idle tasks for all possible
CPUs.  This currently means they all end up on node 0.  This also
implies that the cache line of MWAIT, which is around the flags field in
the task struct, are all located in node 0.

We see a noticeable performance improvement on Knights Landing CPUs when
the cache lines used for MWAIT are located in the local nodes of the
CPUs using them.  I would expect this to give a (likely slight)
improvement on other systems too.

The patch implements placing the idle task in the node of its CPUs, by
passing the right target node to copy_process()

[akpm@linux-foundation.org: use NUMA_NO_NODE, not a bare -1]
Link: http://lkml.kernel.org/r/1463492694-15833-1-git-send-email-andi@firstfloor.orgSigned-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6cc5b73d
...@@ -331,13 +331,14 @@ void set_task_stack_end_magic(struct task_struct *tsk) ...@@ -331,13 +331,14 @@ void set_task_stack_end_magic(struct task_struct *tsk)
*stackend = STACK_END_MAGIC; /* for overflow detection */ *stackend = STACK_END_MAGIC; /* for overflow detection */
} }
static struct task_struct *dup_task_struct(struct task_struct *orig) static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{ {
struct task_struct *tsk; struct task_struct *tsk;
struct thread_info *ti; struct thread_info *ti;
int node = tsk_fork_get_node(orig);
int err; int err;
if (node == NUMA_NO_NODE)
node = tsk_fork_get_node(orig);
tsk = alloc_task_struct_node(node); tsk = alloc_task_struct_node(node);
if (!tsk) if (!tsk)
return NULL; return NULL;
...@@ -1270,7 +1271,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1270,7 +1271,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
int __user *child_tidptr, int __user *child_tidptr,
struct pid *pid, struct pid *pid,
int trace, int trace,
unsigned long tls) unsigned long tls,
int node)
{ {
int retval; int retval;
struct task_struct *p; struct task_struct *p;
...@@ -1323,7 +1325,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1323,7 +1325,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto fork_out; goto fork_out;
retval = -ENOMEM; retval = -ENOMEM;
p = dup_task_struct(current); p = dup_task_struct(current, node);
if (!p) if (!p)
goto fork_out; goto fork_out;
...@@ -1699,7 +1701,8 @@ static inline void init_idle_pids(struct pid_link *links) ...@@ -1699,7 +1701,8 @@ static inline void init_idle_pids(struct pid_link *links)
struct task_struct *fork_idle(int cpu) struct task_struct *fork_idle(int cpu)
{ {
struct task_struct *task; struct task_struct *task;
task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0); task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
cpu_to_node(cpu));
if (!IS_ERR(task)) { if (!IS_ERR(task)) {
init_idle_pids(task->pids); init_idle_pids(task->pids);
init_idle(task, cpu); init_idle(task, cpu);
...@@ -1744,7 +1747,7 @@ long _do_fork(unsigned long clone_flags, ...@@ -1744,7 +1747,7 @@ long _do_fork(unsigned long clone_flags,
} }
p = copy_process(clone_flags, stack_start, stack_size, p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace, tls); child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
/* /*
* Do this prior waking up the new thread - the thread pointer * Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly. * might get invalid after that point, if the thread exits quickly.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment