Commit f34e3b61 authored by Fenghua Yu's avatar Fenghua Yu Committed by Linus Torvalds

use the new percpu interface for shared data

Currently most of the per cpu data, which is accessed by different cpus,
has a ____cacheline_aligned_in_smp attribute.  Move all this data to the
new per cpu shared data section: .data.percpu.shared_aligned.

This will seperate the percpu data which is referenced frequently by other
cpus from the local only percpu data.
Signed-off-by: default avatarFenghua Yu <fenghua.yu@intel.com>
Acked-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5fb7dc37
...@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task); ...@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
* per-CPU TSS segments. Threads are completely 'soft' on Linux, * per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. * no more per-task TSS's.
*/ */
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat);
DEFINE_PER_CPU(struct pt_regs *, irq_regs); DEFINE_PER_CPU(struct pt_regs *, irq_regs);
......
...@@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data; ...@@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data;
#define IPI_KDUMP_CPU_STOP 3 #define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */ /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
extern void cpu_halt (void); extern void cpu_halt (void);
......
...@@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task); ...@@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task);
* section. Since TSS's are completely CPU-local, we want them * section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong. * on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/ */
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
/* Copies of the original ist values from the tss are only accessed during /* Copies of the original ist values from the tss are only accessed during
* debugging, no special alignment required. * debugging, no special alignment required.
......
...@@ -301,7 +301,7 @@ struct rq { ...@@ -301,7 +301,7 @@ struct rq {
struct lock_class_key rq_lock_key; struct lock_class_key rq_lock_key;
}; };
static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static DEFINE_MUTEX(sched_hotcpu_mutex); static DEFINE_MUTEX(sched_hotcpu_mutex);
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment