Commit 068ef739 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm:
  ARM: drop experimental status for ARM_PATCH_PHYS_VIRT
  ARM: 7008/1: alignment: Make SIGBUS sent to userspace POSIXly correct
  ARM: 7007/1: alignment: Prevent ignoring of faults with ARMv6 unaligned access model
  ARM: 7010/1: mm: fix invalid loop for poison_init_mem
  ARM: 7005/1: freshen up mm/proc-arm946.S
  dmaengine: PL08x: Fix trivial build error
  ARM: Fix build error for SMP=n builds
parents a0c49b6b 4eb979d4
...@@ -195,8 +195,7 @@ config VECTORS_BASE ...@@ -195,8 +195,7 @@ config VECTORS_BASE
The base address of exception vectors. The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" bool "Patch physical to virtual translations at runtime"
depends on EXPERIMENTAL
depends on !XIP_KERNEL && MMU depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM depends on !ARCH_REALVIEW || !SPARSEMEM
help help
......
...@@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, ...@@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
#endif #endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp()) if (s && !is_smp())
#ifdef CONFIG_SMP_ON_UP
fixup_smp((void *)s->sh_addr, s->sh_size); fixup_smp((void *)s->sh_addr, s->sh_size);
#else
return -EINVAL;
#endif
return 0; return 0;
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/system.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include "fault.h" #include "fault.h"
...@@ -95,6 +96,33 @@ static const char *usermode_action[] = { ...@@ -95,6 +96,33 @@ static const char *usermode_action[] = {
"signal+warn" "signal+warn"
}; };
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
}
static int safe_usermode(int new_usermode, bool warn)
{
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
new_usermode |= UM_FIXUP;
if (warn)
printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
}
return new_usermode;
}
static int alignment_proc_show(struct seq_file *m, void *v) static int alignment_proc_show(struct seq_file *m, void *v)
{ {
seq_printf(m, "User:\t\t%lu\n", ai_user); seq_printf(m, "User:\t\t%lu\n", ai_user);
...@@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer ...@@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
if (get_user(mode, buffer)) if (get_user(mode, buffer))
return -EFAULT; return -EFAULT;
if (mode >= '0' && mode <= '5') if (mode >= '0' && mode <= '5')
ai_usermode = mode - '0'; ai_usermode = safe_usermode(mode - '0', true);
} }
return count; return count;
} }
...@@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_FIXUP) if (ai_usermode & UM_FIXUP)
goto fixup; goto fixup;
if (ai_usermode & UM_SIGNAL) if (ai_usermode & UM_SIGNAL) {
force_sig(SIGBUS, current); siginfo_t si;
else {
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void __user *)addr;
force_sig_info(si.si_signo, &si, current);
} else {
/* /*
* We're about to disable the alignment trap and return to * We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually * user space. But if an interrupt occurs before actually
...@@ -926,20 +961,11 @@ static int __init alignment_init(void) ...@@ -926,20 +961,11 @@ static int __init alignment_init(void)
return -ENOMEM; return -ENOMEM;
#endif #endif
/* if (cpu_is_v6_unaligned()) {
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
cr_alignment &= ~CR_A; cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A; cr_no_alignment &= ~CR_A;
set_cr(cr_alignment); set_cr(cr_alignment);
ai_usermode = UM_FIXUP; ai_usermode = safe_usermode(ai_usermode, false);
} }
hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
......
...@@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) ...@@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
static inline void poison_init_mem(void *s, size_t count) static inline void poison_init_mem(void *s, size_t count)
{ {
u32 *p = (u32 *)s; u32 *p = (u32 *)s;
while ((count = count - 4)) for (; count != 0; count -= 4)
*p++ = 0xe7fddef0; *p++ = 0xe7fddef0;
} }
......
...@@ -410,6 +410,7 @@ __arm946_proc_info: ...@@ -410,6 +410,7 @@ __arm946_proc_info:
.long 0x41009460 .long 0x41009460
.long 0xff00fff0 .long 0xff00fff0
.long 0 .long 0
.long 0
b __arm946_setup b __arm946_setup
.long cpu_arch_name .long cpu_arch_name
.long cpu_elf_name .long cpu_elf_name
...@@ -418,6 +419,6 @@ __arm946_proc_info: ...@@ -418,6 +419,6 @@ __arm946_proc_info:
.long arm946_processor_functions .long arm946_processor_functions
.long 0 .long 0
.long 0 .long 0
.long arm940_cache_fns .long arm946_cache_fns
.size __arm946_proc_info, . - __arm946_proc_info .size __arm946_proc_info, . - __arm946_proc_info
...@@ -80,6 +80,7 @@ ...@@ -80,6 +80,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/amba/bus.h> #include <linux/amba/bus.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment