Commit b581af51 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86/i386: Put aligned stack-canary in percpu shared_aligned section
  x86/i386: Make sure stack-protector segment base is cache aligned
  x86: Detect stack protector for i386 builds on x86_64
  x86: allow "=rm" in native_save_fl()
  x86: properly annotate alternatives.c
  x86: Introduce GDT_ENTRY_INIT(), initialize bad_bios_desc statically
  x86, 32-bit: Use generic sys_pipe()
  x86: Introduce GDT_ENTRY_INIT(), fix APM
  x86: Introduce GDT_ENTRY_INIT()
  x86: Introduce set_desc_base() and set_desc_limit()
  x86: Remove unused patch_espfix_desc()
  x86: Use get_desc_base()
parents ffaf854b 53f82452
...@@ -72,7 +72,7 @@ endif ...@@ -72,7 +72,7 @@ endif
ifdef CONFIG_CC_STACKPROTECTOR ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y) ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
stackp-y := -fstack-protector stackp-y := -fstack-protector
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
KBUILD_CFLAGS += $(stackp-y) KBUILD_CFLAGS += $(stackp-y)
......
...@@ -537,7 +537,7 @@ ia32_sys_call_table: ...@@ -537,7 +537,7 @@ ia32_sys_call_table:
.quad sys_mkdir .quad sys_mkdir
.quad sys_rmdir /* 40 */ .quad sys_rmdir /* 40 */
.quad sys_dup .quad sys_dup
.quad sys32_pipe .quad sys_pipe
.quad compat_sys_times .quad compat_sys_times
.quad quiet_ni_syscall /* old prof syscall holder */ .quad quiet_ni_syscall /* old prof syscall holder */
.quad sys_brk /* 45 */ .quad sys_brk /* 45 */
......
...@@ -189,20 +189,6 @@ asmlinkage long sys32_mprotect(unsigned long start, size_t len, ...@@ -189,20 +189,6 @@ asmlinkage long sys32_mprotect(unsigned long start, size_t len,
return sys_mprotect(start, len, prot); return sys_mprotect(start, len, prot);
} }
asmlinkage long sys32_pipe(int __user *fd)
{
int retval;
int fds[2];
retval = do_pipe_flags(fds, 0);
if (retval)
goto out;
if (copy_to_user(fd, fds, sizeof(fds)))
retval = -EFAULT;
out:
return retval;
}
asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
struct sigaction32 __user *oact, struct sigaction32 __user *oact,
unsigned int sigsetsize) unsigned int sigsetsize)
......
...@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {} ...@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {} static inline void alternatives_smp_switch(int smp) {}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
const unsigned char *const *find_nop_table(void);
/* alternative assembly primitive: */ /* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \ #define ALTERNATIVE(oldinstr, newinstr, feature) \
\ \
...@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, ...@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL #define __parainstructions_end NULL
#endif #endif
extern void add_nops(void *insns, unsigned int len);
/* /*
* Clear and restore the kernel write-protection flag on the local CPU. * Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages. * Allows the kernel to edit read-only pages.
...@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len); ...@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len);
* Intel's errata. * Intel's errata.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an * On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch. * inconsistent instruction while you patch.
* The _early version expects the memory to already be RW.
*/ */
extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void *text_poke_early(void *addr, const void *opcode, size_t len);
#endif /* _ASM_X86_ALTERNATIVE_H */ #endif /* _ASM_X86_ALTERNATIVE_H */
...@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc) ...@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc)
return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
} }
static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
{
desc->base0 = base & 0xffff;
desc->base1 = (base >> 16) & 0xff;
desc->base2 = (base >> 24) & 0xff;
}
static inline unsigned long get_desc_limit(const struct desc_struct *desc) static inline unsigned long get_desc_limit(const struct desc_struct *desc)
{ {
return desc->limit0 | (desc->limit << 16); return desc->limit0 | (desc->limit << 16);
} }
static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
{
desc->limit0 = limit & 0xffff;
desc->limit = (limit >> 16) & 0xf;
}
static inline void _set_gate(int gate, unsigned type, void *addr, static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg) unsigned dpl, unsigned ist, unsigned seg)
{ {
......
...@@ -34,6 +34,12 @@ struct desc_struct { ...@@ -34,6 +34,12 @@ struct desc_struct {
}; };
} __attribute__((packed)); } __attribute__((packed));
#define GDT_ENTRY_INIT(flags, base, limit) { { { \
.a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
.b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
((limit) & 0xf0000) | ((base) & 0xff000000), \
} } }
enum { enum {
GATE_INTERRUPT = 0xE, GATE_INTERRUPT = 0xE,
GATE_TRAP = 0xF, GATE_TRAP = 0xF,
......
...@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void) ...@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void)
unsigned long flags; unsigned long flags;
/* /*
* Note: this needs to be "=r" not "=rm", because we have the * "=rm" is safe here, because "pop" adjusts the stack before
* stack offset from what gcc expects at the time the "pop" is * it evaluates its effective address -- this is part of the
* executed, and so a memory reference with respect to the stack * documented behavior of the "pop" instruction.
* would end up using the wrong address.
*/ */
asm volatile("# __raw_save_flags\n\t" asm volatile("# __raw_save_flags\n\t"
"pushf ; pop %0" "pushf ; pop %0"
: "=r" (flags) : "=rm" (flags)
: /* no input */ : /* no input */
: "memory"); : "memory");
......
...@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void) ...@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void)
} }
/* Full 4G segment descriptors, suitable for CS and DS. */ /* Full 4G segment descriptors, suitable for CS and DS. */
#define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } }) #define FULL_EXEC_SEGMENT \
#define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } }) ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff))
#define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff))
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -403,7 +403,17 @@ extern unsigned long kernel_eflags; ...@@ -403,7 +403,17 @@ extern unsigned long kernel_eflags;
extern asmlinkage void ignore_sysret(void); extern asmlinkage void ignore_sysret(void);
#else /* X86_64 */ #else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR #ifdef CONFIG_CC_STACKPROTECTOR
DECLARE_PER_CPU(unsigned long, stack_canary); /*
* Make sure stack canary segment base is cached-aligned:
* "For Intel Atom processors, avoid non zero segment base address
* that is not aligned to cache line boundary at all cost."
* (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
*/
struct stack_canary {
char __pad[20]; /* canary at %gs:20 */
unsigned long canary;
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif #endif
#endif /* X86_64 */ #endif /* X86_64 */
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* head_32 for boot CPU and setup_per_cpu_areas() for others. * head_32 for boot CPU and setup_per_cpu_areas() for others.
*/ */
#define GDT_STACK_CANARY_INIT \ #define GDT_STACK_CANARY_INIT \
[GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } }, [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
/* /*
* Initialize the stackprotector canary value. * Initialize the stackprotector canary value.
...@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void) ...@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
percpu_write(irq_stack_union.stack_canary, canary); percpu_write(irq_stack_union.stack_canary, canary);
#else #else
percpu_write(stack_canary, canary); percpu_write(stack_canary.canary, canary);
#endif #endif
} }
static inline void setup_stack_canary_segment(int cpu) static inline void setup_stack_canary_segment(int cpu)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20; unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
struct desc_struct desc; struct desc_struct desc;
desc = gdt_table[GDT_ENTRY_STACK_CANARY]; desc = gdt_table[GDT_ENTRY_STACK_CANARY];
desc.base0 = canary & 0xffff; set_desc_base(&desc, canary);
desc.base1 = (canary >> 16) & 0xff;
desc.base2 = (canary >> 24) & 0xff;
write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
#endif #endif
} }
......
...@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, ...@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
"movl %P[task_canary](%[next]), %%ebx\n\t" \ "movl %P[task_canary](%[next]), %%ebx\n\t" \
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t" "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
#define __switch_canary_oparam \ #define __switch_canary_oparam \
, [stack_canary] "=m" (per_cpu_var(stack_canary)) , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
#define __switch_canary_iparam \ #define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary)) , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */ #else /* CC_STACKPROTECTOR */
...@@ -150,33 +150,6 @@ do { \ ...@@ -150,33 +150,6 @@ do { \
#endif #endif
#ifdef __KERNEL__ #ifdef __KERNEL__
#define _set_base(addr, base) do { unsigned long __pr; \
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
"movb %%dl,%2\n\t" \
"movb %%dh,%3" \
:"=&d" (__pr) \
:"m" (*((addr)+2)), \
"m" (*((addr)+4)), \
"m" (*((addr)+7)), \
"0" (base) \
); } while (0)
#define _set_limit(addr, limit) do { unsigned long __lr; \
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
"movb %2,%%dh\n\t" \
"andb $0xf0,%%dh\n\t" \
"orb %%dh,%%dl\n\t" \
"movb %%dl,%2" \
:"=&d" (__lr) \
:"m" (*(addr)), \
"m" (*((addr)+6)), \
"0" (limit) \
); } while (0)
#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
extern void native_load_gs_index(unsigned); extern void native_load_gs_index(unsigned);
......
...@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi; ...@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi;
void math_error(void __user *); void math_error(void __user *);
void math_emulate(struct math_emu_info *); void math_emulate(struct math_emu_info *);
#ifdef CONFIG_X86_32 #ifndef CONFIG_X86_32
unsigned long patch_espfix_desc(unsigned long, unsigned long);
#else
asmlinkage void smp_thermal_interrupt(void); asmlinkage void smp_thermal_interrupt(void);
asmlinkage void mce_threshold_interrupt(void); asmlinkage void mce_threshold_interrupt(void);
#endif #endif
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/stringify.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -32,7 +33,7 @@ __setup("smp-alt-boot", bootonly); ...@@ -32,7 +33,7 @@ __setup("smp-alt-boot", bootonly);
#define smp_alt_once 1 #define smp_alt_once 1
#endif #endif
static int debug_alternative; static int __initdata_or_module debug_alternative;
static int __init debug_alt(char *str) static int __init debug_alt(char *str)
{ {
...@@ -51,7 +52,7 @@ static int __init setup_noreplace_smp(char *str) ...@@ -51,7 +52,7 @@ static int __init setup_noreplace_smp(char *str)
__setup("noreplace-smp", setup_noreplace_smp); __setup("noreplace-smp", setup_noreplace_smp);
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
static int noreplace_paravirt = 0; static int __initdata_or_module noreplace_paravirt = 0;
static int __init setup_noreplace_paravirt(char *str) static int __init setup_noreplace_paravirt(char *str)
{ {
...@@ -64,16 +65,17 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); ...@@ -64,16 +65,17 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt);
#define DPRINTK(fmt, args...) if (debug_alternative) \ #define DPRINTK(fmt, args...) if (debug_alternative) \
printk(KERN_DEBUG fmt, args) printk(KERN_DEBUG fmt, args)
#ifdef GENERIC_NOP1 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
/* Use inline assembly to define this because the nops are defined /* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot as inline assembly strings in the include files and we cannot
get them easily into strings. */ get them easily into strings. */
asm("\t.section .rodata, \"a\"\nintelnops: " asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
GENERIC_NOP7 GENERIC_NOP8 GENERIC_NOP7 GENERIC_NOP8
"\t.previous"); "\t.previous");
extern const unsigned char intelnops[]; extern const unsigned char intelnops[];
static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { static const unsigned char *const __initconst_or_module
intel_nops[ASM_NOP_MAX+1] = {
NULL, NULL,
intelnops, intelnops,
intelnops + 1, intelnops + 1,
...@@ -87,12 +89,13 @@ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { ...@@ -87,12 +89,13 @@ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
#endif #endif
#ifdef K8_NOP1 #ifdef K8_NOP1
asm("\t.section .rodata, \"a\"\nk8nops: " asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8 K8_NOP7 K8_NOP8
"\t.previous"); "\t.previous");
extern const unsigned char k8nops[]; extern const unsigned char k8nops[];
static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { static const unsigned char *const __initconst_or_module
k8_nops[ASM_NOP_MAX+1] = {
NULL, NULL,
k8nops, k8nops,
k8nops + 1, k8nops + 1,
...@@ -105,13 +108,14 @@ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { ...@@ -105,13 +108,14 @@ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
}; };
#endif #endif
#ifdef K7_NOP1 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
asm("\t.section .rodata, \"a\"\nk7nops: " asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
K7_NOP7 K7_NOP8 K7_NOP7 K7_NOP8
"\t.previous"); "\t.previous");
extern const unsigned char k7nops[]; extern const unsigned char k7nops[];
static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { static const unsigned char *const __initconst_or_module
k7_nops[ASM_NOP_MAX+1] = {
NULL, NULL,
k7nops, k7nops,
k7nops + 1, k7nops + 1,
...@@ -125,12 +129,13 @@ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { ...@@ -125,12 +129,13 @@ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
#endif #endif
#ifdef P6_NOP1 #ifdef P6_NOP1
asm("\t.section .rodata, \"a\"\np6nops: " asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
P6_NOP7 P6_NOP8 P6_NOP7 P6_NOP8
"\t.previous"); "\t.previous");
extern const unsigned char p6nops[]; extern const unsigned char p6nops[];
static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { static const unsigned char *const __initconst_or_module
p6_nops[ASM_NOP_MAX+1] = {
NULL, NULL,
p6nops, p6nops,
p6nops + 1, p6nops + 1,
...@@ -146,7 +151,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { ...@@ -146,7 +151,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
extern char __vsyscall_0; extern char __vsyscall_0;
const unsigned char *const *find_nop_table(void) static const unsigned char *const *__init_or_module find_nop_table(void)
{ {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_has(X86_FEATURE_NOPL)) boot_cpu_has(X86_FEATURE_NOPL))
...@@ -157,7 +162,7 @@ const unsigned char *const *find_nop_table(void) ...@@ -157,7 +162,7 @@ const unsigned char *const *find_nop_table(void)
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
const unsigned char *const *find_nop_table(void) static const unsigned char *const *__init_or_module find_nop_table(void)
{ {
if (boot_cpu_has(X86_FEATURE_K8)) if (boot_cpu_has(X86_FEATURE_K8))
return k8_nops; return k8_nops;
...@@ -172,7 +177,7 @@ const unsigned char *const *find_nop_table(void) ...@@ -172,7 +177,7 @@ const unsigned char *const *find_nop_table(void)
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
/* Use this to add nops to a buffer, then text_poke the whole buffer. */ /* Use this to add nops to a buffer, then text_poke the whole buffer. */
void add_nops(void *insns, unsigned int len) static void __init_or_module add_nops(void *insns, unsigned int len)
{ {
const unsigned char *const *noptable = find_nop_table(); const unsigned char *const *noptable = find_nop_table();
...@@ -185,10 +190,10 @@ void add_nops(void *insns, unsigned int len) ...@@ -185,10 +190,10 @@ void add_nops(void *insns, unsigned int len)
len -= noplen; len -= noplen;
} }
} }
EXPORT_SYMBOL_GPL(add_nops);
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[]; extern u8 *__smp_locks[], *__smp_locks_end[];
static void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type. /* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with This runs before SMP is initialized to avoid SMP problems with
...@@ -196,7 +201,8 @@ extern u8 *__smp_locks[], *__smp_locks_end[]; ...@@ -196,7 +201,8 @@ extern u8 *__smp_locks[], *__smp_locks_end[];
APs have less capabilities than the boot processor are not handled. APs have less capabilities than the boot processor are not handled.
Tough. Make sure you disable such features by hand. */ Tough. Make sure you disable such features by hand. */
void apply_alternatives(struct alt_instr *start, struct alt_instr *end) void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{ {
struct alt_instr *a; struct alt_instr *a;
char insnbuf[MAX_PATCH_LEN]; char insnbuf[MAX_PATCH_LEN];
...@@ -279,7 +285,8 @@ static LIST_HEAD(smp_alt_modules); ...@@ -279,7 +285,8 @@ static LIST_HEAD(smp_alt_modules);
static DEFINE_MUTEX(smp_alt); static DEFINE_MUTEX(smp_alt);
static int smp_mode = 1; /* protected by smp_alt */ static int smp_mode = 1; /* protected by smp_alt */
void alternatives_smp_module_add(struct module *mod, char *name, void __init_or_module alternatives_smp_module_add(struct module *mod,
char *name,
void *locks, void *locks_end, void *locks, void *locks_end,
void *text, void *text_end) void *text, void *text_end)
{ {
...@@ -317,7 +324,7 @@ void alternatives_smp_module_add(struct module *mod, char *name, ...@@ -317,7 +324,7 @@ void alternatives_smp_module_add(struct module *mod, char *name,
mutex_unlock(&smp_alt); mutex_unlock(&smp_alt);
} }
void alternatives_smp_module_del(struct module *mod) void __init_or_module alternatives_smp_module_del(struct module *mod)
{ {
struct smp_alt_module *item; struct smp_alt_module *item;
...@@ -386,7 +393,7 @@ void alternatives_smp_switch(int smp) ...@@ -386,7 +393,7 @@ void alternatives_smp_switch(int smp)
#endif #endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
void apply_paravirt(struct paravirt_patch_site *start, void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end) struct paravirt_patch_site *end)
{ {
struct paravirt_patch_site *p; struct paravirt_patch_site *p;
...@@ -485,7 +492,8 @@ void __init alternative_instructions(void) ...@@ -485,7 +492,8 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE * instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch. * handlers seeing an inconsistent instruction while you patch.
*/ */
void *text_poke_early(void *addr, const void *opcode, size_t len) static void *__init_or_module text_poke_early(void *addr, const void *opcode,
size_t len)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
......
...@@ -403,7 +403,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); ...@@ -403,7 +403,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
static struct apm_user *user_list; static struct apm_user *user_list;
static DEFINE_SPINLOCK(user_list_lock); static DEFINE_SPINLOCK(user_list_lock);
static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } };
/*
* Set up a segment that references the real mode segment 0x40
* that extends up to the end of page zero (that we have reserved).
* This is for buggy BIOS's that refer to (real mode) segment 0x40
* even though they are called in protected mode.
*/
static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
static const char driver_version[] = "1.16ac"; /* no spaces */ static const char driver_version[] = "1.16ac"; /* no spaces */
...@@ -2331,15 +2339,6 @@ static int __init apm_init(void) ...@@ -2331,15 +2339,6 @@ static int __init apm_init(void)
} }
pm_flags |= PM_APM; pm_flags |= PM_APM;
/*
* Set up a segment that references the real mode segment 0x40
* that extends up to the end of page zero (that we have reserved).
* This is for buggy BIOS's that refer to (real mode) segment 0x40
* even though they are called in protected mode.
*/
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
/* /*
* Set up the long jump entry point to the APM BIOS, which is called * Set up the long jump entry point to the APM BIOS, which is called
* from inline assembly. * from inline assembly.
...@@ -2358,12 +2357,12 @@ static int __init apm_init(void) ...@@ -2358,12 +2357,12 @@ static int __init apm_init(void)
* code to that CPU. * code to that CPU.
*/ */
gdt = get_cpu_gdt_table(0); gdt = get_cpu_gdt_table(0);
set_base(gdt[APM_CS >> 3], set_desc_base(&gdt[APM_CS >> 3],
__va((unsigned long)apm_info.bios.cseg << 4)); (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
set_base(gdt[APM_CS_16 >> 3], set_desc_base(&gdt[APM_CS_16 >> 3],
__va((unsigned long)apm_info.bios.cseg_16 << 4)); (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
set_base(gdt[APM_DS >> 3], set_desc_base(&gdt[APM_DS >> 3],
__va((unsigned long)apm_info.bios.dseg << 4)); (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
proc_create("apm", 0, NULL, &apm_file_ops); proc_create("apm", 0, NULL, &apm_file_ops);
......
...@@ -94,45 +94,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { ...@@ -94,45 +94,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
* TLS descriptors are currently at a different place compared to i386. * TLS descriptors are currently at a different place compared to i386.
* Hopefully nobody expects them at a fixed place (Wine?) * Hopefully nobody expects them at a fixed place (Wine?)
*/ */
[GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
[GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
#else #else
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
/* /*
* Segments used for calling PnP BIOS have byte granularity. * Segments used for calling PnP BIOS have byte granularity.
* They code segments and data segments have fixed 64k limits, * They code segments and data segments have fixed 64k limits,
* the transfer segment sizes are set at run time. * the transfer segment sizes are set at run time.
*/ */
/* 32-bit code */ /* 32-bit code */
[GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
/* 16-bit code */ /* 16-bit code */
[GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
/* 16-bit data */ /* 16-bit data */
[GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
/* 16-bit data */ /* 16-bit data */
[GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
/* 16-bit data */ /* 16-bit data */
[GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
/* /*
* The APM segments have byte granularity and their bases * The APM segments have byte granularity and their bases
* are set at run time. All have 64k limits. * are set at run time. All have 64k limits.
*/ */
/* 32-bit code */ /* 32-bit code */
[GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
/* 16-bit code */ /* 16-bit code */
[GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
/* data */ /* data */
[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
[GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } }, [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
[GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
GDT_STACK_CANARY_INIT GDT_STACK_CANARY_INIT
#endif #endif
} }; } };
...@@ -1043,7 +1043,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist); ...@@ -1043,7 +1043,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist);
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR #ifdef CONFIG_CC_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, stack_canary); DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif #endif
/* Make sure %fs and %gs are initialized properly in idle threads */ /* Make sure %fs and %gs are initialized properly in idle threads */
......
...@@ -27,9 +27,7 @@ static void doublefault_fn(void) ...@@ -27,9 +27,7 @@ static void doublefault_fn(void)
if (ptr_ok(gdt)) { if (ptr_ok(gdt)) {
gdt += GDT_ENTRY_TSS << 3; gdt += GDT_ENTRY_TSS << 3;
tss = *(u16 *)(gdt+2); tss = get_desc_base((struct desc_struct *)gdt);
tss += *(u8 *)(gdt+4) << 16;
tss += *(u8 *)(gdt+7) << 24;
printk(KERN_EMERG "double fault, tss at %08lx\n", tss); printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
if (ptr_ok(tss)) { if (ptr_ok(tss)) {
......
...@@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP ...@@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP
jne 1f jne 1f
movl $per_cpu__gdt_page,%eax movl $per_cpu__gdt_page,%eax
movl $per_cpu__stack_canary,%ecx movl $per_cpu__stack_canary,%ecx
subl $20, %ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <asm/desc.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{ {
...@@ -23,7 +24,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re ...@@ -23,7 +24,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
* and APM bios ones we just ignore here. * and APM bios ones we just ignore here.
*/ */
if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
u32 *desc; struct desc_struct *desc;
unsigned long base; unsigned long base;
seg &= ~7UL; seg &= ~7UL;
...@@ -33,12 +34,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re ...@@ -33,12 +34,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
addr = -1L; /* bogus selector, access would fault */ addr = -1L; /* bogus selector, access would fault */
else { else {
desc = child->mm->context.ldt + seg; desc = child->mm->context.ldt + seg;
base = ((desc[0] >> 16) | base = get_desc_base(desc);
((desc[1] & 0xff) << 16) |
(desc[1] & 0xff000000));
/* 16-bit code segment? */ /* 16-bit code segment? */
if (!((desc[1] >> 22) & 1)) if (!desc->d)
addr &= 0xffff; addr &= 0xffff;
addr += base; addr += base;
} }
......
...@@ -786,27 +786,6 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) ...@@ -786,27 +786,6 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
#endif #endif
} }
#ifdef CONFIG_X86_32
unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
{
struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
unsigned long base = (kesp - uesp) & -THREAD_SIZE;
unsigned long new_kesp = kesp - base;
unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
__u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
/* Set up base for espfix segment */
desc &= 0x00f0ff0000000000ULL;
desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
((((__u64)base) << 32) & 0xff00000000000000ULL) |
((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
(lim_pages & 0xffff);
*(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
return new_kesp;
}
#endif
asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
{ {
} }
......
...@@ -55,12 +55,13 @@ __asm__(".text \n" ...@@ -55,12 +55,13 @@ __asm__(".text \n"
#define Q2_SET_SEL(cpu, selname, address, size) \ #define Q2_SET_SEL(cpu, selname, address, size) \
do { \ do { \
struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
set_base(gdt[(selname) >> 3], (u32)(address)); \ set_desc_base(&gdt[(selname) >> 3], (u32)(address)); \
set_limit(gdt[(selname) >> 3], size); \ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
} while(0) } while(0)
static struct desc_struct bad_bios_desc; static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
/* /*
* At some point we want to use this stack frame pointer to unwind * At some point we want to use this stack frame pointer to unwind
...@@ -476,19 +477,15 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) ...@@ -476,19 +477,15 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
pnp_bios_callpoint.offset = header->fields.pm16offset; pnp_bios_callpoint.offset = header->fields.pm16offset;
pnp_bios_callpoint.segment = PNP_CS16; pnp_bios_callpoint.segment = PNP_CS16;
bad_bios_desc.a = 0;
bad_bios_desc.b = 0x00409200;
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct desc_struct *gdt = get_cpu_gdt_table(i); struct desc_struct *gdt = get_cpu_gdt_table(i);
if (!gdt) if (!gdt)
continue; continue;
set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc); set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32],
set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], (unsigned long)&pnp_bios_callfunc);
__va(header->fields.pm16cseg)); set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS16],
set_base(gdt[GDT_ENTRY_PNPBIOS_DS], (unsigned long)__va(header->fields.pm16cseg));
__va(header->fields.pm16dseg)); set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
(unsigned long)__va(header->fields.pm16dseg));
} }
} }
...@@ -81,14 +81,17 @@ extern void setup_per_cpu_areas(void); ...@@ -81,14 +81,17 @@ extern void setup_per_cpu_areas(void);
#ifdef MODULE #ifdef MODULE
#define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ""
#else #else
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
#endif #endif
#define PER_CPU_FIRST_SECTION ".first" #define PER_CPU_FIRST_SECTION ".first"
#else #else
#define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
#define PER_CPU_FIRST_SECTION "" #define PER_CPU_FIRST_SECTION ""
#endif #endif
......
...@@ -103,8 +103,8 @@ ...@@ -103,8 +103,8 @@
#define __INIT .section ".init.text","ax" #define __INIT .section ".init.text","ax"
#define __FINIT .previous #define __FINIT .previous
#define __INITDATA .section ".init.data","aw" #define __INITDATA .section ".init.data","aw",%progbits
#define __INITRODATA .section ".init.rodata","a" #define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous #define __FINITDATA .previous
#define __DEVINIT .section ".devinit.text", "ax" #define __DEVINIT .section ".devinit.text", "ax"
...@@ -305,9 +305,17 @@ void __init parse_early_options(char *cmdline); ...@@ -305,9 +305,17 @@ void __init parse_early_options(char *cmdline);
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
#define __init_or_module #define __init_or_module
#define __initdata_or_module #define __initdata_or_module
#define __initconst_or_module
#define __INIT_OR_MODULE .text
#define __INITDATA_OR_MODULE .data
#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
#else #else
#define __init_or_module __init #define __init_or_module __init
#define __initdata_or_module __initdata #define __initdata_or_module __initdata
#define __initconst_or_module __initconst
#define __INIT_OR_MODULE __INIT
#define __INITDATA_OR_MODULE __INITDATA
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/ #endif /*CONFIG_MODULES*/
/* Functions marked as __devexit may be discarded at kernel link time, depending /* Functions marked as __devexit may be discarded at kernel link time, depending
......
...@@ -66,6 +66,14 @@ ...@@ -66,6 +66,14 @@
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
____cacheline_aligned_in_smp ____cacheline_aligned_in_smp
#define DECLARE_PER_CPU_ALIGNED(type, name) \
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
____cacheline_aligned
#define DEFINE_PER_CPU_ALIGNED(type, name) \
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
____cacheline_aligned
/* /*
* Declaration/definition used for per-CPU variables that must be page aligned. * Declaration/definition used for per-CPU variables that must be page aligned.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment