Commit a2b49102 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm

* 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm:
  ARM: 7099/1: futex: preserve oldval in SMP __futex_atomic_op
  ARM: dma-mapping: free allocated page if unable to map
  ARM: fix vmlinux.lds.S discarding sections
  ARM: nommu: fix warning with checksyscalls.sh
  ARM: 7091/1: errata: D-cache line maintenance operation by MVA may not succeed
parents f9d81f61 df77abca
......@@ -1283,6 +1283,20 @@ config ARM_ERRATA_364296
processor into full low interrupt latency mode. ARM11MPCore
is not affected.
config ARM_ERRATA_764369
bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
depends on CPU_V7 && SMP
help
This option enables the workaround for erratum 764369
affecting Cortex-A9 MPCore with two or more processors (all
current revisions). Under certain timing circumstances, a data
cache line maintenance operation by MVA targeting an Inner
Shareable memory region may fail to proceed up to either the
Point of Coherency or to the Point of Unification of the
system. This workaround adds a DSB instruction before the
relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU.
endmenu
source "arch/arm/common/Kconfig"
......
......@@ -25,17 +25,17 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
__asm__ __volatile__( \
"1: ldrex %1, [%2]\n" \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
"2: strex %1, %0, [%2]\n" \
" teq %1, #0\n" \
"2: strex %2, %0, [%3]\n" \
" teq %2, #0\n" \
" bne 1b\n" \
" mov %0, #0\n" \
__futex_atomic_ex_table("%4") \
: "=&r" (ret), "=&r" (oldval) \
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
......@@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <linux/preempt.h>
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
__asm__ __volatile__( \
"1: " T(ldr) " %1, [%2]\n" \
"1: " T(ldr) " %1, [%3]\n" \
" " insn "\n" \
"2: " T(str) " %0, [%2]\n" \
"2: " T(str) " %0, [%3]\n" \
" mov %0, #0\n" \
__futex_atomic_ex_table("%4") \
: "=&r" (ret), "=&r" (oldval) \
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
......@@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
......@@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
__futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
__futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
default:
ret = -ENOSYS;
......
......@@ -478,8 +478,8 @@
/*
* Unimplemented (or alternatively implemented) syscalls
*/
#define __IGNORE_fadvise64_64 1
#define __IGNORE_migrate_pages 1
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */
......@@ -13,6 +13,7 @@
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#define SCU_CTRL 0x00
#define SCU_CONFIG 0x04
......@@ -37,6 +38,15 @@ void __init scu_enable(void __iomem *scu_base)
{
u32 scu_ctrl;
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
scu_ctrl = __raw_readl(scu_base + 0x30);
if (!(scu_ctrl & 1))
__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
}
#endif
scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)
......
......@@ -23,8 +23,10 @@
#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
#define ARM_EXIT_KEEP(x)
#define ARM_EXIT_DISCARD(x) x
#endif
OUTPUT_ARCH(arm)
......@@ -39,6 +41,11 @@ jiffies = jiffies_64 + 4;
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
......@@ -47,6 +54,9 @@ SECTIONS
*(.ARM.extab.exit.text)
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
ARM_EXIT_DISCARD(EXIT_TEXT)
ARM_EXIT_DISCARD(EXIT_DATA)
EXIT_CALL
#ifndef CONFIG_HOTPLUG
*(.ARM.exidx.devexit.text)
*(.ARM.extab.devexit.text)
......@@ -58,6 +68,8 @@ SECTIONS
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
*(.discard)
*(.discard.*)
}
#ifdef CONFIG_XIP_KERNEL
......@@ -279,9 +291,6 @@ SECTIONS
STABS_DEBUG
.comment 0 : { *(.comment) }
/* Default discards */
DISCARDS
}
/*
......
......@@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
add r12, r12, r2
......@@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
......@@ -247,6 +255,10 @@ v7_dma_inv_range:
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3
......@@ -270,6 +282,10 @@ v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
add r0, r0, r2
......@@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
#ifdef CONFIG_ARM_ERRATA_764369
ALT_SMP(W(dsb))
ALT_UP(W(nop))
#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
add r0, r0, r2
......
......@@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
else
__dma_free_buffer(page, size);
return addr;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment