Commit 05991bef authored by travis@sgi.com's avatar travis@sgi.com Committed by Ingo Molnar

ia64: use generic percpu

ia64 has a special processor specific mapping that can be used to locate the
offset for the current per cpu area.

Cc: linux-ia64@vger.kernel.org
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Acked-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3afc6202
...@@ -19,29 +19,14 @@ ...@@ -19,29 +19,14 @@
# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__))) # define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__)))
#endif #endif
#define DECLARE_PER_CPU(type, name) \
extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern unsigned long __per_cpu_offset[NR_CPUS]; #define __my_cpu_offset __ia64_per_cpu_var(local_per_cpu_offset)
#define per_cpu_offset(x) (__per_cpu_offset[x])
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
extern void setup_per_cpu_areas (void);
extern void *per_cpu_init(void); extern void *per_cpu_init(void);
#else /* ! SMP */ #else /* ! SMP */
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
#define per_cpu_init() (__phys_per_cpu_start) #define per_cpu_init() (__phys_per_cpu_start)
#endif /* SMP */ #endif /* SMP */
...@@ -52,7 +37,12 @@ extern void *per_cpu_init(void); ...@@ -52,7 +37,12 @@ extern void *per_cpu_init(void);
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient. * more efficient.
*/ */
#define __ia64_per_cpu_var(var) (per_cpu__##var) #define __ia64_per_cpu_var(var) per_cpu__##var
#include <asm-generic/percpu.h>
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -9,10 +9,6 @@ ...@@ -9,10 +9,6 @@
#include <asm/percpu.h> #include <asm/percpu.h>
#ifndef PER_CPU_ATTRIBUTES
#define PER_CPU_ATTRIBUTES
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define DEFINE_PER_CPU(type, name) \ #define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) \ __attribute__((__section__(".data.percpu"))) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment