Commit ccc875c1 authored by Arjan van de Ven's avatar Arjan van de Ven Committed by Linus Torvalds

[PATCH] Randomisation: stack randomisation

The patch below replaces the existing 8Kb randomisation of the userspace stack
pointer (which is currently only done for Hyperthreaded P-IVs) with a more
general randomisation over a 64Kb range.  64Kb is not a lot, but it's a start
and once the dust settles we can increase this value to a more agressive
value.
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c518b108
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/random.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -828,3 +829,9 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) ...@@ -828,3 +829,9 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
return 0; return 0;
} }
unsigned long arch_align_stack(unsigned long sp)
{
if (randomize_va_space)
sp -= get_random_int() % 8192;
return sp & ~0xf;
}
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/random.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -749,3 +750,10 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) ...@@ -749,3 +750,10 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
return 1; return 1;
} }
unsigned long arch_align_stack(unsigned long sp)
{
if (randomize_va_space)
sp -= get_random_int() % 8192;
return sp & ~0xf;
}
...@@ -165,21 +165,14 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, ...@@ -165,21 +165,14 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
if (k_platform) { if (k_platform) {
size_t len = strlen(k_platform) + 1; size_t len = strlen(k_platform) + 1;
#ifdef CONFIG_X86_HT
/* /*
* In some cases (e.g. Hyper-Threading), we want to avoid L1 * In some cases (e.g. Hyper-Threading), we want to avoid L1
* evictions by the processes running on the same package. One * evictions by the processes running on the same package. One
* thing we can do is to shuffle the initial stack for them. * thing we can do is to shuffle the initial stack for them.
*
* The conditionals here are unneeded, but kept in to make the
* code behaviour the same as pre change unless we have
* hyperthreaded processors. This should be cleaned up
* before 2.6
*/ */
if (smp_num_siblings > 1) p = arch_align_stack(p);
STACK_ALLOC(p, ((current->pid % 64) << 7));
#endif
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
if (__copy_to_user(u_platform, k_platform, len)) if (__copy_to_user(u_platform, k_platform, len))
return -EFAULT; return -EFAULT;
......
...@@ -400,7 +400,8 @@ int setup_arg_pages(struct linux_binprm *bprm, ...@@ -400,7 +400,8 @@ int setup_arg_pages(struct linux_binprm *bprm,
while (i < MAX_ARG_PAGES) while (i < MAX_ARG_PAGES)
bprm->page[i++] = NULL; bprm->page[i++] = NULL;
#else #else
stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE; stack_base = arch_align_stack(STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE);
stack_base = PAGE_ALIGN(stack_base);
bprm->p += stack_base; bprm->p += stack_base;
mm->arg_start = bprm->p; mm->arg_start = bprm->p;
arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start); arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
......
...@@ -621,4 +621,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -621,4 +621,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
#endif #endif
...@@ -383,6 +383,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -383,6 +383,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -245,6 +245,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -245,6 +245,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -69,4 +69,6 @@ extern inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -69,4 +69,6 @@ extern inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
return x; return x;
} }
#define arch_align_stack(x) (x)
#endif #endif
...@@ -123,4 +123,6 @@ do { \ ...@@ -123,4 +123,6 @@ do { \
extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2)));
extern void free_initmem(void); extern void free_initmem(void);
#define arch_align_stack(x) (x)
#endif /* _ASM_SYSTEM_H */ #endif /* _ASM_SYSTEM_H */
...@@ -144,4 +144,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -144,4 +144,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
asm("jmp @@0"); \ asm("jmp @@0"); \
}) })
#define arch_align_stack(x) (x)
#endif /* _H8300_SYSTEM_H */ #endif /* _H8300_SYSTEM_H */
...@@ -468,4 +468,6 @@ void enable_hlt(void); ...@@ -468,4 +468,6 @@ void enable_hlt(void);
extern int es7000_plat; extern int es7000_plat;
void cpu_idle_wait(void); void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
#endif #endif
...@@ -285,6 +285,9 @@ do { \ ...@@ -285,6 +285,9 @@ do { \
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
void cpu_idle_wait(void); void cpu_idle_wait(void);
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -294,4 +294,6 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, ...@@ -294,4 +294,6 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
#define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) do { xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define arch_align_stack(x) (x)
#endif /* _ASM_M32R_SYSTEM_H */ #endif /* _ASM_M32R_SYSTEM_H */
...@@ -194,6 +194,8 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, ...@@ -194,6 +194,8 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
(unsigned long)(n),sizeof(*(ptr)))) (unsigned long)(n),sizeof(*(ptr))))
#endif #endif
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _M68K_SYSTEM_H */ #endif /* _M68K_SYSTEM_H */
...@@ -281,5 +281,6 @@ cmpxchg(volatile int *p, int old, int new) ...@@ -281,5 +281,6 @@ cmpxchg(volatile int *p, int old, int new)
}) })
#endif #endif
#endif #endif
#define arch_align_stack(x) (x)
#endif /* _M68KNOMMU_SYSTEM_H */ #endif /* _M68KNOMMU_SYSTEM_H */
...@@ -433,4 +433,6 @@ do { \ ...@@ -433,4 +433,6 @@ do { \
#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) #define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) #define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
#define arch_align_stack(x) (x)
#endif /* _ASM_SYSTEM_H */ #endif /* _ASM_SYSTEM_H */
...@@ -205,4 +205,6 @@ extern spinlock_t pa_tlb_lock; ...@@ -205,4 +205,6 @@ extern spinlock_t pa_tlb_lock;
#endif #endif
#define arch_align_stack(x) (x)
#endif #endif
...@@ -201,5 +201,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -201,5 +201,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
(unsigned long)_n_, sizeof(*(ptr))); \ (unsigned long)_n_, sizeof(*(ptr))); \
}) })
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __PPC_SYSTEM_H */ #endif /* __PPC_SYSTEM_H */
...@@ -300,5 +300,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -300,5 +300,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
*/ */
#define NET_IP_ALIGN 0 #define NET_IP_ALIGN 0
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -461,6 +461,8 @@ extern void (*_machine_restart)(char *command); ...@@ -461,6 +461,8 @@ extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void); extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void); extern void (*_machine_power_off)(void);
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
......
...@@ -259,4 +259,6 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int ...@@ -259,4 +259,6 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int
void disable_hlt(void); void disable_hlt(void);
void enable_hlt(void); void enable_hlt(void);
#define arch_align_stack(x) (x)
#endif #endif
...@@ -191,4 +191,6 @@ extern void print_seg(char *file,int line); ...@@ -191,4 +191,6 @@ extern void print_seg(char *file,int line);
#define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__) #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
#define arch_align_stack(x) (x)
#endif /* __ASM_SH64_SYSTEM_H */ #endif /* __ASM_SH64_SYSTEM_H */
...@@ -257,4 +257,6 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret ...@@ -257,4 +257,6 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
#endif /* !(__SPARC_SYSTEM_H) */ #endif /* !(__SPARC_SYSTEM_H) */
...@@ -341,4 +341,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -341,4 +341,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
#define arch_align_stack(x) (x)
#endif /* !(__SPARC64_SYSTEM_H) */ #endif /* !(__SPARC64_SYSTEM_H) */
...@@ -108,4 +108,6 @@ extern inline unsigned long __xchg (unsigned long with, ...@@ -108,4 +108,6 @@ extern inline unsigned long __xchg (unsigned long with,
return tmp; return tmp;
} }
#define arch_align_stack(x) (x)
#endif /* __V850_SYSTEM_H__ */ #endif /* __V850_SYSTEM_H__ */
...@@ -338,4 +338,6 @@ void enable_hlt(void); ...@@ -338,4 +338,6 @@ void enable_hlt(void);
#define HAVE_EAT_KEY #define HAVE_EAT_KEY
void eat_key(void); void eat_key(void);
extern unsigned long arch_align_stack(unsigned long sp);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment