Commit d60ddd24 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - decompressor updates

 - prevention of out-of-bounds access while stacktracing

 - fix a section mismatch warning with free_memmap()

 - make kexec depend on MMU to avoid some build errors

 - remove swapops stubs

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 8954/1: NOMMU: remove stubs for swapops
  ARM: 8952/1: Disable kmemleak on XIP kernels
  ARM: 8951/1: Fix Kexec compilation issue.
  ARM: 8949/1: mm: mark free_memmap as __init
  ARM: 8948/1: Prevent OOB access in stacktrace
  ARM: 8945/1: decompressor: use CONFIG option instead of cc-option
  ARM: 8942/1: Revert "8857/1: efi: enable CP15 DMB instructions before cleaning the cache"
  ARM: 8941/1: decompressor: enable CP15 barrier instructions in v7 cache setup code
parents 71c3a888 03a575a6
...@@ -74,7 +74,7 @@ config ARM ...@@ -74,7 +74,7 @@ config ARM
select HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING
select HAVE_COPY_THREAD_TLS select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
...@@ -1905,7 +1905,7 @@ config XIP_DEFLATED_DATA ...@@ -1905,7 +1905,7 @@ config XIP_DEFLATED_DATA
config KEXEC config KEXEC
bool "Kexec system call (EXPERIMENTAL)" bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || PM_SLEEP_SMP) depends on (!SMP || PM_SLEEP_SMP)
depends on !CPU_V7M depends on MMU
select KEXEC_CORE select KEXEC_CORE
help help
kexec is a system call that implements the ability to shutdown your kexec is a system call that implements the ability to shutdown your
......
...@@ -110,12 +110,12 @@ endif ...@@ -110,12 +110,12 @@ endif
# -fstack-protector-strong triggers protection checks in this code, # -fstack-protector-strong triggers protection checks in this code,
# but it is being used too early to link to meaningful stack_chk logic. # but it is being used too early to link to meaningful stack_chk logic.
nossp_flags := $(call cc-option, -fno-stack-protector) nossp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector
CFLAGS_atags_to_fdt.o := $(nossp_flags) CFLAGS_atags_to_fdt.o := $(nossp-flags-y)
CFLAGS_fdt.o := $(nossp_flags) CFLAGS_fdt.o := $(nossp-flags-y)
CFLAGS_fdt_ro.o := $(nossp_flags) CFLAGS_fdt_ro.o := $(nossp-flags-y)
CFLAGS_fdt_rw.o := $(nossp_flags) CFLAGS_fdt_rw.o := $(nossp-flags-y)
CFLAGS_fdt_wip.o := $(nossp_flags) CFLAGS_fdt_wip.o := $(nossp-flags-y)
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
asflags-y := -DZIMAGE asflags-y := -DZIMAGE
......
...@@ -140,6 +140,17 @@ ...@@ -140,6 +140,17 @@
#endif #endif
.endm .endm
.macro enable_cp15_barriers, reg
mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR
tst \reg, #(1 << 5) @ CP15BEN bit set?
bne .L_\@
orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions
mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR
ARM( .inst 0xf57ff06f @ v7+ isb )
THUMB( isb )
.L_\@:
.endm
.section ".start", "ax" .section ".start", "ax"
/* /*
* sort out different calling conventions * sort out different calling conventions
...@@ -820,6 +831,7 @@ __armv4_mmu_cache_on: ...@@ -820,6 +831,7 @@ __armv4_mmu_cache_on:
mov pc, r12 mov pc, r12
__armv7_mmu_cache_on: __armv7_mmu_cache_on:
enable_cp15_barriers r11
mov r12, lr mov r12, lr
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
...@@ -1209,6 +1221,7 @@ __armv6_mmu_cache_flush: ...@@ -1209,6 +1221,7 @@ __armv6_mmu_cache_flush:
mov pc, lr mov pc, lr
__armv7_mmu_cache_flush: __armv7_mmu_cache_flush:
enable_cp15_barriers r10
tst r4, #1 tst r4, #1
bne iflush bne iflush
mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
...@@ -1447,21 +1460,7 @@ ENTRY(efi_stub_entry) ...@@ -1447,21 +1460,7 @@ ENTRY(efi_stub_entry)
@ Preserve return value of efi_entry() in r4 @ Preserve return value of efi_entry() in r4
mov r4, r0 mov r4, r0
bl cache_clean_flush
@ our cache maintenance code relies on CP15 barrier instructions
@ but since we arrived here with the MMU and caches configured
@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
@ the enable path will be executed on v7+ only.
mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
tst r1, #(1 << 5) @ CP15BEN bit set?
bne 0f
orr r1, r1, #(1 << 5) @ CP15 barrier instructions
mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
ARM( .inst 0xf57ff06f @ v7+ isb )
THUMB( isb )
0: bl cache_clean_flush
bl cache_off bl cache_off
@ Set parameters for booting zImage according to boot protocol @ Set parameters for booting zImage according to boot protocol
......
...@@ -42,12 +42,6 @@ ...@@ -42,12 +42,6 @@
#define swapper_pg_dir ((pgd_t *) 0) #define swapper_pg_dir ((pgd_t *) 0)
#define __swp_type(x) (0)
#define __swp_offset(x) (0)
#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;
......
...@@ -92,6 +92,8 @@ static int save_trace(struct stackframe *frame, void *d) ...@@ -92,6 +92,8 @@ static int save_trace(struct stackframe *frame, void *d)
return 0; return 0;
regs = (struct pt_regs *)frame->sp; regs = (struct pt_regs *)frame->sp;
if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
return 0;
trace->entries[trace->nr_entries++] = regs->ARM_pc; trace->entries[trace->nr_entries++] = regs->ARM_pc;
......
...@@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); ...@@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{ {
unsigned long end = frame + 4 + sizeof(struct pt_regs);
#ifdef CONFIG_KALLSYMS #ifdef CONFIG_KALLSYMS
printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else #else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif #endif
if (in_entry_text(from)) if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); dump_mem("", "Exception stack", frame + 4, end);
} }
void dump_backtrace_stm(u32 *stack, u32 instruction) void dump_backtrace_stm(u32 *stack, u32 instruction)
......
...@@ -324,7 +324,7 @@ static inline void poison_init_mem(void *s, size_t count) ...@@ -324,7 +324,7 @@ static inline void poison_init_mem(void *s, size_t count)
*p++ = 0xe7fddef0; *p++ = 0xe7fddef0;
} }
static inline void static inline void __init
free_memmap(unsigned long start_pfn, unsigned long end_pfn) free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{ {
struct page *start_pg, *end_pg; struct page *start_pg, *end_pg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment