Commit 42be3f35 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:
 - Relax VDSO alignment requirements so that the kernel-picked one (4K)
   does not conflict with the dynamic linker's one (64K)
 - VDSO gettimeofday fix
 - Barrier fixes for atomic operations and cache flushing
 - TLB invalidation when overriding early page mappings during boot
 - Wired up new 32-bit arm (compat) syscalls
 - LSM_MMAP_MIN_ADDR when COMPAT is enabled
 - defconfig update
 - Clean-up (comments, pgd_alloc).

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: defconfig: Expand default enabled features
  arm64: asm: remove redundant "cc" clobbers
  arm64: atomics: fix use of acquire + release for full barrier semantics
  arm64: barriers: allow dsb macro to take option parameter
  security: select correct default LSM_MMAP_MIN_ADDR on arm on arm64
  arm64: compat: Wire up new AArch32 syscalls
  arm64: vdso: update wtm fields for CLOCK_MONOTONIC_COARSE
  arm64: vdso: fix coarse clock handling
  arm64: simplify pgd_alloc
  arm64: fix typo: s/SERRROR/SERROR/
  arm64: Invalidate the TLB when replacing pmd entries during boot
  arm64: Align CMA sizes to PAGE_SIZE
  arm64: add DSB after icache flush in __flush_icache_all()
  arm64: vdso: prevent ld from aligning PT_LOAD segments to 64k
parents d94d0e27 55834a77
...@@ -36,6 +36,7 @@ config ARM64 ...@@ -36,6 +36,7 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select IRQ_DOMAIN select IRQ_DOMAIN
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
......
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
...@@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y ...@@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
...@@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y ...@@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y CONFIG_ARCH_XGENE=y
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0" CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y CONFIG_COMPAT=y
...@@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y ...@@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_WIRELESS is not set # CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV=y CONFIG_DMA_CMA=y
CONFIG_SCSI=y CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set # CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_MII=y
CONFIG_SMC91X=y CONFIG_SMC91X=y
CONFIG_SMSC911X=y
# CONFIG_WLAN is not set # CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_I8042 is not set
...@@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y ...@@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set # CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set # CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set # CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_USB_SUPPORT is not set CONFIG_USB=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y CONFIG_EXT3_FS=y
......
...@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
...@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_add_return\n" asm volatile("// atomic_add_return\n"
"1: ldaxr %w0, %2\n" "1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n" " add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
...@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
...@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_sub_return\n" asm volatile("// atomic_sub_return\n"
"1: ldaxr %w0, %2\n" "1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n" " sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
...@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp; unsigned long tmp;
int oldval; int oldval;
smp_mb();
asm volatile("// atomic_cmpxchg\n" asm volatile("// atomic_cmpxchg\n"
"1: ldaxr %w1, %2\n" "1: ldxr %w1, %2\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %w4, %2\n" " stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc", "memory"); : "cc");
smp_mb();
return oldval; return oldval;
} }
...@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline long atomic64_add_return(long i, atomic64_t *v) static inline long atomic64_add_return(long i, atomic64_t *v)
...@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v) ...@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_add_return\n" asm volatile("// atomic64_add_return\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" add %0, %0, %3\n" " add %0, %0, %3\n"
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
...@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) ...@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline long atomic64_sub_return(long i, atomic64_t *v) static inline long atomic64_sub_return(long i, atomic64_t *v)
...@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) ...@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_sub_return\n" asm volatile("// atomic64_sub_return\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" sub %0, %0, %3\n" " sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
...@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) ...@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval; long oldval;
unsigned long res; unsigned long res;
smp_mb();
asm volatile("// atomic64_cmpxchg\n" asm volatile("// atomic64_cmpxchg\n"
"1: ldaxr %1, %2\n" "1: ldxr %1, %2\n"
" cmp %1, %3\n" " cmp %1, %3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %4, %2\n" " stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc", "memory"); : "cc");
smp_mb();
return oldval; return oldval;
} }
...@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) ...@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n" asm volatile("// atomic64_dec_if_positive\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" subs %0, %0, #1\n" " subs %0, %0, #1\n"
" b.mi 2f\n" " b.mi 2f\n"
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
" dmb ish\n"
"2:" "2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: :
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define wfi() asm volatile("wfi" : : : "memory") #define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory") #define isb() asm volatile("isb" : : : "memory")
#define dsb() asm volatile("dsb sy" : : : "memory") #define dsb(opt) asm volatile("dsb sy" : : : "memory")
#define mb() dsb() #define mb() dsb()
#define rmb() asm volatile("dsb ld" : : : "memory") #define rmb() asm volatile("dsb ld" : : : "memory")
......
...@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *); ...@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void) static inline void __flush_icache_all(void)
{ {
asm("ic ialluis"); asm("ic ialluis");
dsb();
} }
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \
......
...@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) { switch (size) {
case 1: case 1:
asm volatile("// __xchg1\n" asm volatile("// __xchg1\n"
"1: ldaxrb %w0, %2\n" "1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n" " stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 2: case 2:
asm volatile("// __xchg2\n" asm volatile("// __xchg2\n"
"1: ldaxrh %w0, %2\n" "1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n" " stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 4: case 4:
asm volatile("// __xchg4\n" asm volatile("// __xchg4\n"
"1: ldaxr %w0, %2\n" "1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n" " stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 8: case 8:
asm volatile("// __xchg8\n" asm volatile("// __xchg8\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n" " stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
default: default:
BUILD_BUG(); BUILD_BUG();
} }
smp_mb();
return ret; return ret;
} }
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#define ESR_EL1_EC_SP_ALIGN (0x26) #define ESR_EL1_EC_SP_ALIGN (0x26)
#define ESR_EL1_EC_FP_EXC32 (0x28) #define ESR_EL1_EC_FP_EXC32 (0x28)
#define ESR_EL1_EC_FP_EXC64 (0x2C) #define ESR_EL1_EC_FP_EXC64 (0x2C)
#define ESR_EL1_EC_SERRROR (0x2F) #define ESR_EL1_EC_SERROR (0x2F)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30) #define ESR_EL1_EC_BREAKPT_EL0 (0x30)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31) #define ESR_EL1_EC_BREAKPT_EL1 (0x31)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) #define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
......
...@@ -24,10 +24,11 @@ ...@@ -24,10 +24,11 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \ asm volatile( \
"1: ldaxr %w1, %2\n" \ "1: ldxr %w1, %2\n" \
insn "\n" \ insn "\n" \
"2: stlxr %w3, %w0, %2\n" \ "2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \ " cbnz %w3, 1b\n" \
" dmb ish\n" \
"3:\n" \ "3:\n" \
" .pushsection .fixup,\"ax\"\n" \ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \ " .align 2\n" \
...@@ -40,7 +41,7 @@ ...@@ -40,7 +41,7 @@
" .popsection\n" \ " .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \ : "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "memory")
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
...@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n" asm volatile("// futex_atomic_cmpxchg_inatomic\n"
"1: ldaxr %w1, %2\n" "1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n" " sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n" " cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n" "2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n" " cbnz %w3, 1b\n"
" dmb ish\n"
"3:\n" "3:\n"
" .pushsection .fixup,\"ax\"\n" " .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n" "4: mov %w0, %w6\n"
...@@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n" " .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "cc", "memory"); : "memory");
*uval = val; *uval = val;
return ret; return ret;
......
...@@ -231,7 +231,7 @@ ...@@ -231,7 +231,7 @@
#define ESR_EL2_EC_SP_ALIGN (0x26) #define ESR_EL2_EC_SP_ALIGN (0x26)
#define ESR_EL2_EC_FP_EXC32 (0x28) #define ESR_EL2_EC_FP_EXC32 (0x28)
#define ESR_EL2_EC_FP_EXC64 (0x2C) #define ESR_EL2_EC_FP_EXC64 (0x2C)
#define ESR_EL2_EC_SERRROR (0x2F) #define ESR_EL2_EC_SERROR (0x2F)
#define ESR_EL2_EC_BREAKPT (0x30) #define ESR_EL2_EC_BREAKPT (0x30)
#define ESR_EL2_EC_BREAKPT_HYP (0x31) #define ESR_EL2_EC_BREAKPT_HYP (0x31)
#define ESR_EL2_EC_SOFTSTP (0x32) #define ESR_EL2_EC_SOFTSTP (0x32)
......
...@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n" " cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000) : "r" (0x80000000)
: "cc", "memory"); : "memory");
} }
static inline int arch_write_trylock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw)
...@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) ...@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n" "1:\n"
: "=&r" (tmp), "+Q" (rw->lock) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000) : "r" (0x80000000)
: "cc", "memory"); : "memory");
return !tmp; return !tmp;
} }
...@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n" " cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
} }
static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
...@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) ...@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
} }
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
...@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) ...@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n" "1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
return !tmp2; return !tmp2;
} }
......
...@@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg) ...@@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg)
__SYSCALL(375, sys_setns) __SYSCALL(375, sys_setns)
__SYSCALL(376, compat_sys_process_vm_readv) __SYSCALL(376, compat_sys_process_vm_readv)
__SYSCALL(377, compat_sys_process_vm_writev) __SYSCALL(377, compat_sys_process_vm_writev)
__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ __SYSCALL(378, sys_kcmp)
__SYSCALL(379, sys_finit_module)
__SYSCALL(380, sys_sched_setattr)
__SYSCALL(381, sys_sched_getattr)
#define __NR_compat_syscalls 379 #define __NR_compat_syscalls 379
......
...@@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60 ...@@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1] .inst 0xe1c160d0 // ldrd r6, r7, [r1]
.inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4 .inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x00313005 // eoreqs r3, r1, r5
.inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1 .inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b .inst 0x0afffff9 // beq 1b
.inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr .inst 0xe12fff1e // bx lr
...@@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0 ...@@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0
.align 5 .align 5
__kuser_cmpxchg: // 0xffff0fc0 __kuser_cmpxchg: // 0xffff0fc0
.inst 0xe1923e9f // 1: ldaex r3, [r2] .inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0 .inst 0xe0533000 // subs r3, r3, r0
.inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1 .inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b .inst 0x0afffffa // beq 1b
.inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe12fff1e // bx lr .inst 0xe12fff1e // bx lr
......
...@@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->use_syscall = use_syscall; vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) { if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last; vdso_data->cs_cycle_last = tk->clock->cycle_last;
...@@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult; vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift; vdso_data->cs_shift = tk->shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
} }
smp_wmb(); smp_wmb();
......
...@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S ...@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
# Actual build commands # Actual build commands
quiet_cmd_vdsold = VDSOL $@ quiet_cmd_vdsold = VDSOL $@
cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@ quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
......
...@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime) ...@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
bl __do_get_tspec bl __do_get_tspec
seqcnt_check w9, 1b seqcnt_check w9, 1b
mov x30, x2
cmp w0, #CLOCK_MONOTONIC cmp w0, #CLOCK_MONOTONIC
b.ne 6f b.ne 6f
...@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime) ...@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f b.ne 8f
/* xtime_coarse_nsec is already right-shifted */
mov x12, #0
/* Get coarse timespec. */ /* Get coarse timespec. */
adr vdso_data, _vdso_data adr vdso_data, _vdso_data
3: seqcnt_acquire 3: seqcnt_acquire
...@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime) ...@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
lsr x11, x11, x12 lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC] stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr mov x0, xzr
ret x2 ret
7: 7:
mov x30, x2 mov x30, x2
8: /* Syscall fallback. */ 8: /* Syscall fallback. */
......
...@@ -46,11 +46,12 @@ ENTRY( \name ) ...@@ -46,11 +46,12 @@ ENTRY( \name )
mov x2, #1 mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask lsl x4, x2, x3 // Create mask
1: ldaxr x2, [x1] 1: ldxr x2, [x1]
lsr x0, x2, x3 // Save old value of bit lsr x0, x2, x3 // Save old value of bit
\instr x2, x2, x4 // toggle bit \instr x2, x2, x4 // toggle bit
stlxr w5, x2, [x1] stlxr w5, x2, [x1]
cbnz w5, 1b cbnz w5, 1b
dmb ish
and x0, x0, #1 and x0, x0, #1
3: ret 3: ret
ENDPROC(\name ) ENDPROC(\name )
......
...@@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size, ...@@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_CMA)) { if (IS_ENABLED(CONFIG_DMA_CMA)) {
struct page *page; struct page *page;
size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size)); get_order(size));
if (!page) if (!page)
......
...@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, ...@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
/* try section mapping first */ /* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) if (((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | prot_sect_kernel)); set_pmd(pmd, __pmd(phys | prot_sect_kernel));
else /*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
*/
if (!pmd_none(old_pmd))
flush_tlb_all();
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
}
phys += next - addr; phys += next - addr;
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
} }
......
...@@ -32,17 +32,10 @@ ...@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *new_pgd;
if (PGD_SIZE == PAGE_SIZE) if (PGD_SIZE == PAGE_SIZE)
new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else else
new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); return kzalloc(PGD_SIZE, GFP_KERNEL);
if (!new_pgd)
return NULL;
return new_pgd;
} }
void pgd_free(struct mm_struct *mm, pgd_t *pgd) void pgd_free(struct mm_struct *mm, pgd_t *pgd)
......
...@@ -103,7 +103,7 @@ config INTEL_TXT ...@@ -103,7 +103,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation" int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX depends on SECURITY && SECURITY_SELINUX
default 32768 if ARM default 32768 if ARM || (ARM64 && COMPAT)
default 65536 default 65536
help help
This is the portion of low virtual memory which should be protected This is the portion of low virtual memory which should be protected
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment