Commit 24195cad authored by Russell King's avatar Russell King

Merge branch 'security-fixes' into fixes

parents 2449189b a5463cd3
...@@ -217,7 +217,8 @@ config VECTORS_BASE ...@@ -217,7 +217,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000 default 0x00000000
help help
The base address of exception vectors. The base address of exception vectors. This must be two pages
in size.
config ARM_PATCH_PHYS_VIRT config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED bool "Patch physical to virtual translations at runtime" if EMBEDDED
......
...@@ -130,4 +130,8 @@ struct mm_struct; ...@@ -130,4 +130,8 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm); extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk #define arch_randomize_brk arch_randomize_brk
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
int arch_setup_additional_pages(struct linux_binprm *, int);
#endif #endif
...@@ -10,6 +10,7 @@ typedef struct { ...@@ -10,6 +10,7 @@ typedef struct {
int switch_pending; int switch_pending;
#endif #endif
unsigned int vmalloc_seq; unsigned int vmalloc_seq;
unsigned long sigpage;
} mm_context_t; } mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
......
...@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, ...@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from); extern void copy_page(void *to, const void *from);
#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1 #define __HAVE_ARCH_GATE_AREA 1
#endif
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h> #include <asm/pgtable-3level-types.h>
......
...@@ -742,6 +742,18 @@ ENDPROC(__switch_to) ...@@ -742,6 +742,18 @@ ENDPROC(__switch_to)
#endif #endif
.endm .endm
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
#ifdef CONFIG_KUSER_HELPERS
.align 5 .align 5
.globl __kuser_helper_start .globl __kuser_helper_start
__kuser_helper_start: __kuser_helper_start:
...@@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup: ...@@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration" #error "incoherent kernel configuration"
#endif #endif
/* pad to next slot */ kuser_pad __kuser_cmpxchg64, 64
.rept (16 - (. - __kuser_cmpxchg64)/4)
.word 0
.endr
.align 5
__kuser_memory_barrier: @ 0xffff0fa0 __kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm smp_dmb arm
usr_ret lr usr_ret lr
.align 5 kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0 __kuser_cmpxchg: @ 0xffff0fc0
...@@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup: ...@@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup:
#endif #endif
.align 5 kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0 __kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
.rep 4 kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then .word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version .endr @ pad up to __kuser_helper_version
...@@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc ...@@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end .globl __kuser_helper_end
__kuser_helper_end: __kuser_helper_end:
#endif
THUMB( .thumb ) THUMB( .thumb )
/* /*
* Vector stubs. * Vector stubs.
* *
* This code is copied to 0xffff0200 so we can use branches in the * This code is copied to 0xffff1000 so we can use branches in the
* vectors, rather than ldr's. Note that this code must not * vectors, rather than ldr's. Note that this code must not exceed
* exceed 0x300 bytes. * a page size.
* *
* Common stub entry macro: * Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
...@@ -986,8 +996,17 @@ ENDPROC(vector_\name) ...@@ -986,8 +996,17 @@ ENDPROC(vector_\name)
1: 1:
.endm .endm
.globl __stubs_start .section .stubs, "ax", %progbits
__stubs_start: __stubs_start:
@ This must be the first word
.word vector_swi
vector_rst:
ARM( swi SYS_ERROR0 )
THUMB( svc #0 )
THUMB( nop )
b vector_und
/* /*
* Interrupt dispatcher * Interrupt dispatcher
*/ */
...@@ -1081,6 +1100,16 @@ __stubs_start: ...@@ -1081,6 +1100,16 @@ __stubs_start:
.align 5 .align 5
/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn:
b vector_addrexcptn
/*============================================================================= /*=============================================================================
* Undefined FIQs * Undefined FIQs
*----------------------------------------------------------------------------- *-----------------------------------------------------------------------------
...@@ -1094,45 +1123,19 @@ __stubs_start: ...@@ -1094,45 +1123,19 @@ __stubs_start:
vector_fiq: vector_fiq:
subs pc, lr, #4 subs pc, lr, #4
/*============================================================================= .globl vector_fiq_offset
* Address exception handler .equ vector_fiq_offset, vector_fiq
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn:
b vector_addrexcptn
/*
* We group all the following data together to optimise
* for CPUs with separate I & D caches.
*/
.align 5
.LCvswi:
.word vector_swi
.globl __stubs_end
__stubs_end:
.equ stubs_offset, __vectors_start + 0x200 - __stubs_start
.globl __vectors_start .section .vectors, "ax", %progbits
__vectors_start: __vectors_start:
ARM( swi SYS_ERROR0 ) W(b) vector_rst
THUMB( svc #0 ) W(b) vector_und
THUMB( nop ) W(ldr) pc, __vectors_start + 0x1000
W(b) vector_und + stubs_offset W(b) vector_pabt
W(ldr) pc, .LCvswi + stubs_offset W(b) vector_dabt
W(b) vector_pabt + stubs_offset W(b) vector_addrexcptn
W(b) vector_dabt + stubs_offset W(b) vector_irq
W(b) vector_addrexcptn + stubs_offset W(b) vector_fiq
W(b) vector_irq + stubs_offset
W(b) vector_fiq + stubs_offset
.globl __vectors_end
__vectors_end:
.data .data
......
...@@ -47,6 +47,11 @@ ...@@ -47,6 +47,11 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/traps.h> #include <asm/traps.h>
#define FIQ_OFFSET ({ \
extern void *vector_fiq_offset; \
(unsigned)&vector_fiq_offset; \
})
static unsigned long no_fiq_insn; static unsigned long no_fiq_insn;
/* Default reacquire function /* Default reacquire function
...@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec) ...@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length) void set_fiq_handler(void *start, unsigned int length)
{ {
#if defined(CONFIG_CPU_USE_DOMAINS) #if defined(CONFIG_CPU_USE_DOMAINS)
memcpy((void *)0xffff001c, start, length); void *base = (void *)0xffff0000;
#else #else
memcpy(vectors_page + 0x1c, start, length); void *base = vectors_page;
#endif #endif
flush_icache_range(0xffff001c, 0xffff001c + length); unsigned offset = FIQ_OFFSET;
memcpy(base + offset, start, length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
if (!vectors_high()) if (!vectors_high())
flush_icache_range(0x1c, 0x1c + length); flush_icache_range(offset, offset + length);
} }
int claim_fiq(struct fiq_handler *f) int claim_fiq(struct fiq_handler *f)
...@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq); ...@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start) void __init init_FIQ(int start)
{ {
no_fiq_insn = *(unsigned long *)0xffff001c; unsigned offset = FIQ_OFFSET;
no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start; fiq_start = start;
} }
...@@ -429,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ...@@ -429,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/* /*
* The vectors page is always readable from user space for the * The vectors page is always readable from user space for the
* atomic helpers and the signal restart code. Insert it into the * atomic helpers. Insert it into the gate_vma so that it is visible
* gate_vma so that it is visible through ptrace and /proc/<pid>/mem. * through ptrace and /proc/<pid>/mem.
*/ */
static struct vm_area_struct gate_vma = { static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000, .vm_start = 0xffff0000,
...@@ -461,9 +462,47 @@ int in_gate_area_no_mm(unsigned long addr) ...@@ -461,9 +462,47 @@ int in_gate_area_no_mm(unsigned long addr)
{ {
return in_gate_area(NULL, addr); return in_gate_area(NULL, addr);
} }
#define is_gate_vma(vma) ((vma) = &gate_vma)
#else
#define is_gate_vma(vma) 0
#endif
const char *arch_vma_name(struct vm_area_struct *vma) const char *arch_vma_name(struct vm_area_struct *vma)
{ {
return (vma == &gate_vma) ? "[vectors]" : NULL; return is_gate_vma(vma) ? "[vectors]" :
(vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
"[sigpage]" : NULL;
}
extern struct page *get_signal_page(void);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct page *page;
unsigned long addr;
int ret;
page = get_signal_page();
if (!page)
return -ENOMEM;
down_write(&mm->mmap_sem);
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
ret = install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&page);
if (ret == 0)
mm->context.sigpage = addr;
up_fail:
up_write(&mm->mmap_sem);
return ret;
} }
#endif #endif
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/random.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -15,12 +16,11 @@ ...@@ -15,12 +16,11 @@
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/vfp.h> #include <asm/vfp.h>
#include "signal.h"
/* /*
* For ARM syscalls, we encode the syscall number into the instruction. * For ARM syscalls, we encode the syscall number into the instruction.
*/ */
...@@ -40,11 +40,13 @@ ...@@ -40,11 +40,13 @@
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
const unsigned long sigreturn_codes[7] = { static const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
}; };
static unsigned long signal_return_offset;
#ifdef CONFIG_CRUNCH #ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame) static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{ {
...@@ -401,12 +403,15 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, ...@@ -401,12 +403,15 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
return 1; return 1;
if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) {
struct mm_struct *mm = current->mm;
/* /*
* 32-bit code can use the new high-page * 32-bit code can use the signal return page
* signal return code support except when the MPU has * except when the MPU has protected the vectors
* protected the vectors page from PL0 * page from PL0
*/ */
retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; retcode = mm->context.sigpage + signal_return_offset +
(idx << 2) + thumb;
} else { } else {
/* /*
* Ensure that the instruction cache sees * Ensure that the instruction cache sees
...@@ -608,3 +613,36 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) ...@@ -608,3 +613,36 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK); } while (thread_flags & _TIF_WORK_MASK);
return 0; return 0;
} }
static struct page *signal_page;
struct page *get_signal_page(void)
{
if (!signal_page) {
unsigned long ptr;
unsigned offset;
void *addr;
signal_page = alloc_pages(GFP_KERNEL, 0);
if (!signal_page)
return NULL;
addr = page_address(signal_page);
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
ptr = (unsigned long)addr + offset;
flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
}
return signal_page;
}
/*
* linux/arch/arm/kernel/signal.h
*
* Copyright (C) 2005-2009 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
extern const unsigned long sigreturn_codes[7];
...@@ -35,8 +35,6 @@ ...@@ -35,8 +35,6 @@
#include <asm/tls.h> #include <asm/tls.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include "signal.h"
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page; void *vectors_page;
...@@ -800,15 +798,26 @@ void __init trap_init(void) ...@@ -800,15 +798,26 @@ void __init trap_init(void)
return; return;
} }
static void __init kuser_get_tls_init(unsigned long vectors) #ifdef CONFIG_KUSER_HELPERS
static void __init kuser_init(void *vectors)
{ {
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/* /*
* vectors + 0xfe0 = __kuser_get_tls * vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/ */
if (tls_emu || has_tls_reg) if (tls_emu || has_tls_reg)
memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
} }
#else
static void __init kuser_init(void *vectors)
{
}
#endif
void __init early_trap_init(void *vectors_base) void __init early_trap_init(void *vectors_base)
{ {
...@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base) ...@@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base)
unsigned long vectors = (unsigned long)vectors_base; unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[]; extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[]; extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[]; unsigned i;
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
vectors_page = vectors_base; vectors_page = vectors_base;
/*
* Poison the vectors page with an undefined instruction. This
* instruction is chosen to be undefined for both ARM and Thumb
* ISAs. The Thumb version is an undefined instruction with a
* branch back to the undefined instruction.
*/
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
((u32 *)vectors_base)[i] = 0xe7fddef1;
/* /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S) * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these * into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream. * are visible to the instruction stream.
*/ */
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/* kuser_init(vectors_base);
* Do processor specific fixups for the kuser helpers
*/
kuser_get_tls_init(vectors);
/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
flush_icache_range(vectors, vectors + PAGE_SIZE); flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT); modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */ #else /* ifndef CONFIG_CPU_V7M */
/* /*
......
...@@ -148,6 +148,23 @@ SECTIONS ...@@ -148,6 +148,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_begin = .; __init_begin = .;
#endif #endif
/*
* The vectors and stubs are relocatable code, and the
* only thing that matters is their relative offsets
*/
__vectors_start = .;
.vectors 0 : AT(__vectors_start) {
*(.vectors)
}
. = __vectors_start + SIZEOF(.vectors);
__vectors_end = .;
__stubs_start = .;
.stubs 0x1000 : AT(__stubs_start) {
*(.stubs)
}
. = __stubs_start + SIZEOF(.stubs);
__stubs_end = .;
INIT_TEXT_SECTION(8) INIT_TEXT_SECTION(8)
.exit.text : { .exit.text : {
......
...@@ -421,24 +421,28 @@ config CPU_32v3 ...@@ -421,24 +421,28 @@ config CPU_32v3
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v4 config CPU_32v4
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v4T config CPU_32v4T
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v5 config CPU_32v5
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
select NEED_KUSER_HELPERS
config CPU_32v6 config CPU_32v6
bool bool
...@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE ...@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE
config TLS_REG_EMUL config TLS_REG_EMUL
bool bool
select NEED_KUSER_HELPERS
help help
An SMP system using a pre-ARMv6 processor (there are apparently An SMP system using a pre-ARMv6 processor (there are apparently
a few prototypes like that in existence) and therefore access to a few prototypes like that in existence) and therefore access to
...@@ -783,11 +788,40 @@ config TLS_REG_EMUL ...@@ -783,11 +788,40 @@ config TLS_REG_EMUL
config NEEDS_SYSCALL_FOR_CMPXCHG config NEEDS_SYSCALL_FOR_CMPXCHG
bool bool
select NEED_KUSER_HELPERS
help help
SMP on a pre-ARMv6 processor? Well OK then. SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support. Forget about fast user space cmpxchg support.
It is just not possible. It is just not possible.
config NEED_KUSER_HELPERS
bool
config KUSER_HELPERS
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
default y
help
Warning: disabling this option may break user programs.
Provide kuser helpers in the vector page. The kernel provides
helper code to userspace in read only form at a fixed location
in the high vector page to allow userspace to be independent of
the CPU type fitted to the system. This permits binaries to be
run on ARMv4 through to ARMv7 without modification.
However, the fixed address nature of these helpers can be used
by ROP (return orientated programming) authors when creating
exploits.
If all of the binaries and libraries which run on your platform
are built specifically for your platform, and make no use of
these helpers, then you can turn this option off. However,
when such an binary or library is run, it will receive a SIGILL
signal, which will terminate the program.
Say N here only if you are absolutely certain that you do not
need these helpers; otherwise, the safe option is to say Y.
config DMA_CACHE_RWFO config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance" bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6K && SMP depends on CPU_V6K && SMP
......
...@@ -1195,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) ...@@ -1195,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/* /*
* Allocate the vector page early. * Allocate the vector page early.
*/ */
vectors = early_alloc(PAGE_SIZE); vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors); early_trap_init(vectors);
...@@ -1240,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) ...@@ -1240,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000; map.virtual = 0xffff0000;
map.length = PAGE_SIZE; map.length = PAGE_SIZE;
#ifdef CONFIG_KUSER_HELPERS
map.type = MT_HIGH_VECTORS; map.type = MT_HIGH_VECTORS;
#else
map.type = MT_LOW_VECTORS;
#endif
create_mapping(&map); create_mapping(&map);
if (!vectors_high()) { if (!vectors_high()) {
map.virtual = 0; map.virtual = 0;
map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS; map.type = MT_LOW_VECTORS;
create_mapping(&map); create_mapping(&map);
} }
/* Now create a kernel read-only mapping */
map.pfn += 1;
map.virtual = 0xffff0000 + PAGE_SIZE;
map.length = PAGE_SIZE;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
/* /*
* Ask the machine support to map in the statically mapped devices. * Ask the machine support to map in the statically mapped devices.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment