Commit 9b8de747 authored by David Howells's avatar David Howells Committed by Linus Torvalds

FRV: Fix the section attribute on UP DECLARE_PER_CPU()

In non-SMP mode, the variable section attribute specified by DECLARE_PER_CPU()
does not agree with that specified by DEFINE_PER_CPU().  This means that
architectures that have a small data section references relative to a base
register may throw up linkage errors due to too great a displacement between
where the base register points and the per-CPU variable.

On FRV, the .h declaration says that the variable is in the .sdata section, but
the .c definition says it's actually in the .data section.  The linker throws
up the following errors:

kernel/built-in.o: In function `release_task':
kernel/exit.c:78: relocation truncated to fit: R_FRV_GPREL12 against symbol `per_cpu__process_counts' defined in .data section in kernel/built-in.o
kernel/exit.c:78: relocation truncated to fit: R_FRV_GPREL12 against symbol `per_cpu__process_counts' defined in .data section in kernel/built-in.o

To fix this, DECLARE_PER_CPU() should simply apply the same section attribute
as does DEFINE_PER_CPU().  However, this is made slightly more complex by
virtue of the fact that there are several variants on DEFINE, so these need to
be matched by variants on DECLARE.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ccc5ff94
...@@ -73,6 +73,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; ...@@ -73,6 +73,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#endif /* SMP */ #endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name) #include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */ #endif /* __ALPHA_PERCPU_H */
...@@ -58,7 +58,7 @@ extern struct smp_boot_data { ...@@ -58,7 +58,7 @@ extern struct smp_boot_data {
extern char no_int_routing __devinitdata; extern char no_int_routing __devinitdata;
extern cpumask_t cpu_core_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS];
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
extern int smp_num_siblings; extern int smp_num_siblings;
extern void __iomem *ipi_base_addr; extern void __iomem *ipi_base_addr;
extern unsigned char smp_int_redirect; extern unsigned char smp_int_redirect;
......
...@@ -37,7 +37,7 @@ extern gate_desc idt_table[]; ...@@ -37,7 +37,7 @@ extern gate_desc idt_table[];
struct gdt_page { struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES]; struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE))); } __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU(struct gdt_page, gdt_page); DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{ {
......
...@@ -26,7 +26,7 @@ typedef struct { ...@@ -26,7 +26,7 @@ typedef struct {
#endif #endif
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat); DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */ /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS #define MAX_HARDIRQS_PER_CPU NR_VECTORS
......
...@@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss; ...@@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss;
extern __u32 cleared_cpu_caps[NCAPINTS]; extern __u32 cleared_cpu_caps[NCAPINTS];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu) per_cpu(cpu_info, cpu) #define cpu_data(cpu) per_cpu(cpu_info, cpu)
#define current_cpu_data __get_cpu_var(cpu_info) #define current_cpu_data __get_cpu_var(cpu_info)
#else #else
...@@ -270,7 +270,7 @@ struct tss_struct { ...@@ -270,7 +270,7 @@ struct tss_struct {
} ____cacheline_aligned; } ____cacheline_aligned;
DECLARE_PER_CPU(struct tss_struct, init_tss); DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
/* /*
* Save the original ist values for checking stack pointers during debugging * Save the original ist values for checking stack pointers during debugging
...@@ -393,7 +393,7 @@ union irq_stack_union { ...@@ -393,7 +393,7 @@ union irq_stack_union {
}; };
}; };
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
DECLARE_INIT_PER_CPU(irq_stack_union); DECLARE_INIT_PER_CPU(irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr); DECLARE_PER_CPU(char *, irq_stack_ptr);
......
...@@ -152,7 +152,7 @@ struct tlb_state { ...@@ -152,7 +152,7 @@ struct tlb_state {
struct mm_struct *active_mm; struct mm_struct *active_mm;
int state; int state;
}; };
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
static inline void reset_lazy_tlbstate(void) static inline void reset_lazy_tlbstate(void)
{ {
......
...@@ -73,11 +73,50 @@ extern void setup_per_cpu_areas(void); ...@@ -73,11 +73,50 @@ extern void setup_per_cpu_areas(void);
#endif /* SMP */ #endif /* SMP */
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
#define PER_CPU_BASE_SECTION ".data.percpu"
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
#endif
#ifdef CONFIG_SMP
#ifdef MODULE
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
#endif
#define PER_CPU_FIRST_SECTION ".first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_FIRST_SECTION ""
#endif
#ifndef PER_CPU_ATTRIBUTES #ifndef PER_CPU_ATTRIBUTES
#define PER_CPU_ATTRIBUTES #define PER_CPU_ATTRIBUTES
#endif #endif
#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ #define DECLARE_PER_CPU_SECTION(type, name, section) \
__typeof__(type) per_cpu_var(name) extern \
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#define DECLARE_PER_CPU(type, name) \
DECLARE_PER_CPU_SECTION(type, name, "")
#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
____cacheline_aligned_in_smp
#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
DECLARE_PER_CPU_SECTION(type, name, ".page_aligned")
#define DECLARE_PER_CPU_FIRST(type, name) \
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
#endif /* _ASM_GENERIC_PERCPU_H_ */ #endif /* _ASM_GENERIC_PERCPU_H_ */
...@@ -9,30 +9,6 @@ ...@@ -9,30 +9,6 @@
#include <asm/percpu.h> #include <asm/percpu.h>
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
#define PER_CPU_BASE_SECTION ".data.percpu"
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
#endif
#ifdef CONFIG_SMP
#ifdef MODULE
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
#endif
#define PER_CPU_FIRST_SECTION ".first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_FIRST_SECTION ""
#endif
#define DEFINE_PER_CPU_SECTION(type, name, section) \ #define DEFINE_PER_CPU_SECTION(type, name, section) \
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \ __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
......
...@@ -638,7 +638,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *, ...@@ -638,7 +638,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *,
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
/* stats.c */ /* stats.c */
DECLARE_PER_CPU(struct rds_statistics, rds_stats); DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
#define rds_stats_inc_which(which, member) do { \ #define rds_stats_inc_which(which, member) do { \
per_cpu(which, get_cpu()).member++; \ per_cpu(which, get_cpu()).member++; \
put_cpu(); \ put_cpu(); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment