Commit a5e90b1b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "Further ARM fixes:
   - Anson Huang noticed that we were corrupting a register we shouldn't
     be during suspend on some CPUs.
   - Shengjiu Wang spotted a bug in the 'swp' instruction emulation.
   - Will Deacon fixed a bug in the ASID allocator.
   - Laura Abbott fixed the kernel permission protection to apply to all
     threads running in the system.
   - I've fixed two bugs with the domain access control register
     handling, one to do with printing an appropriate value at oops
     time, and the other to further fix the uaccess_with_memcpy code"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8475/1: SWP emulation: Restore original *data when failed
  ARM: 8471/1: need to save/restore arm register(r11) when it is corrupted
  ARM: fix uaccess_with_memcpy() with SW_DOMAIN_PAN
  ARM: report proper DACR value in oops dumps
  ARM: 8464/1: Update all mm structures with section adjustments
  ARM: 8465/1: mm: keep reserved ASIDs in sync with mm after multiple rollovers
parents edb42dc7 34bfbae3
...@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n); ...@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) __copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
#ifndef CONFIG_UACCESS_WITH_MEMCPY
unsigned int __ua_flags = uaccess_save_and_enable(); unsigned int __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n); n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags); uaccess_restore(__ua_flags);
return n; return n;
#else
return arm_copy_to_user(to, from, n);
#endif
} }
extern unsigned long __must_check extern unsigned long __must_check
......
...@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs) ...@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
{ {
unsigned long flags; unsigned long flags;
char buf[64]; char buf[64];
#ifndef CONFIG_CPU_V7M
unsigned int domain;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if (user_mode(regs))
domain = DACR_UACCESS_ENABLE;
else
domain = *(unsigned int *)(regs + 1);
#else
domain = get_domain();
#endif
#endif
show_regs_print_info(KERN_DEFAULT); show_regs_print_info(KERN_DEFAULT);
...@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs) ...@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
{ {
unsigned int domain = get_domain();
const char *segment; const char *segment;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if (user_mode(regs))
domain = DACR_UACCESS_ENABLE;
else
domain = *(unsigned int *)(regs + 1);
#endif
if ((domain & domain_mask(DOMAIN_USER)) == if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none"; segment = "none";
...@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs) ...@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
buf[0] = '\0'; buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU #ifdef CONFIG_CPU_CP15_MMU
{ {
unsigned int transbase, dac = get_domain(); unsigned int transbase;
asm("mrc p15, 0, %0, c2, c0\n\t" asm("mrc p15, 0, %0, c2, c0\n\t"
: "=r" (transbase)); : "=r" (transbase));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac); transbase, domain);
} }
#endif #endif
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
......
...@@ -36,10 +36,10 @@ ...@@ -36,10 +36,10 @@
*/ */
#define __user_swpX_asm(data, addr, res, temp, B) \ #define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" mov %2, %1\n" \ "0: ldrex"B" %2, [%3]\n" \
"0: ldrex"B" %1, [%3]\n" \ "1: strex"B" %0, %1, [%3]\n" \
"1: strex"B" %0, %2, [%3]\n" \
" cmp %0, #0\n" \ " cmp %0, #0\n" \
" moveq %1, %2\n" \
" movne %0, %4\n" \ " movne %0, %4\n" \
"2:\n" \ "2:\n" \
" .section .text.fixup,\"ax\"\n" \ " .section .text.fixup,\"ax\"\n" \
......
...@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) ...@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
static unsigned long noinline static unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{ {
unsigned long ua_flags;
int atomic; int atomic;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
...@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) ...@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
if (tocopy > n) if (tocopy > n)
tocopy = n; tocopy = n;
ua_flags = uaccess_save_and_enable();
memcpy((void *)to, from, tocopy); memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy; to += tocopy;
from += tocopy; from += tocopy;
n -= tocopy; n -= tocopy;
...@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
* With frame pointer disabled, tail call optimization kicks in * With frame pointer disabled, tail call optimization kicks in
* as well making this test almost invisible. * as well making this test almost invisible.
*/ */
if (n < 64) if (n < 64) {
return __copy_to_user_std(to, from, n); unsigned long ua_flags = uaccess_save_and_enable();
return __copy_to_user_memcpy(to, from, n); n = __copy_to_user_std(to, from, n);
uaccess_restore(ua_flags);
} else {
n = __copy_to_user_memcpy(to, from, n);
}
return n;
} }
static unsigned long noinline static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n) __clear_user_memset(void __user *addr, unsigned long n)
{ {
unsigned long ua_flags;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
memset((void *)addr, 0, n); memset((void *)addr, 0, n);
return 0; return 0;
...@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n) ...@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
if (tocopy > n) if (tocopy > n)
tocopy = n; tocopy = n;
ua_flags = uaccess_save_and_enable();
memset((void *)addr, 0, tocopy); memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy; addr += tocopy;
n -= tocopy; n -= tocopy;
...@@ -193,9 +205,14 @@ __clear_user_memset(void __user *addr, unsigned long n) ...@@ -193,9 +205,14 @@ __clear_user_memset(void __user *addr, unsigned long n)
unsigned long arm_clear_user(void __user *addr, unsigned long n) unsigned long arm_clear_user(void __user *addr, unsigned long n)
{ {
/* See rational for this in __copy_to_user() above. */ /* See rational for this in __copy_to_user() above. */
if (n < 64) if (n < 64) {
return __clear_user_std(addr, n); unsigned long ua_flags = uaccess_save_and_enable();
return __clear_user_memset(addr, n); n = __clear_user_std(addr, n);
uaccess_restore(ua_flags);
} else {
n = __clear_user_memset(addr, n);
}
return n;
} }
#if 0 #if 0
......
...@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu) ...@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
__flush_icache_all(); __flush_icache_all();
} }
static int is_reserved_asid(u64 asid) static bool check_update_reserved_asid(u64 asid, u64 newasid)
{ {
int cpu; int cpu;
for_each_possible_cpu(cpu) bool hit = false;
if (per_cpu(reserved_asids, cpu) == asid)
return 1; /*
return 0; * Iterate over the set of reserved ASIDs looking for a match.
* If we find one, then we can update our mm to use newasid
* (i.e. the same ASID in the current generation) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old ASID are updated to reflect the mm. Failure to do
* so could result in us missing the reserved ASID in a future
* generation.
*/
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_asids, cpu) == asid) {
hit = true;
per_cpu(reserved_asids, cpu) = newasid;
}
}
return hit;
} }
static u64 new_context(struct mm_struct *mm, unsigned int cpu) static u64 new_context(struct mm_struct *mm, unsigned int cpu)
...@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 generation = atomic64_read(&asid_generation); u64 generation = atomic64_read(&asid_generation);
if (asid != 0) { if (asid != 0) {
u64 newasid = generation | (asid & ~ASID_MASK);
/* /*
* If our current ASID was active during a rollover, we * If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm. * can continue to use it and this was just a false alarm.
*/ */
if (is_reserved_asid(asid)) if (check_update_reserved_asid(asid, newasid))
return generation | (asid & ~ASID_MASK); return newasid;
/* /*
* We had a valid ASID in a previous life, so try to re-use * We had a valid ASID in a previous life, so try to re-use
...@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
*/ */
asid &= ~ASID_MASK; asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map)) if (!__test_and_set_bit(asid, asid_map))
goto bump_gen; return newasid;
} }
/* /*
...@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
__set_bit(asid, asid_map); __set_bit(asid, asid_map);
cur_idx = asid; cur_idx = asid;
bump_gen:
asid |= generation;
cpumask_clear(mm_cpumask(mm)); cpumask_clear(mm_cpumask(mm));
return asid; return asid | generation;
} }
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/dma-contiguous.h> #include <linux/dma-contiguous.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/stop_machine.h>
#include <asm/cp15.h> #include <asm/cp15.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = { ...@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
* safe to be called with preemption disabled, as under stop_machine(). * safe to be called with preemption disabled, as under stop_machine().
*/ */
static inline void section_update(unsigned long addr, pmdval_t mask, static inline void section_update(unsigned long addr, pmdval_t mask,
pmdval_t prot) pmdval_t prot, struct mm_struct *mm)
{ {
struct mm_struct *mm;
pmd_t *pmd; pmd_t *pmd;
mm = current->active_mm;
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
...@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void) ...@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
return !!(get_cr() & CR_XP); return !!(get_cr() & CR_XP);
} }
#define set_section_perms(perms, field) { \ void set_section_perms(struct section_perm *perms, int n, bool set,
size_t i; \ struct mm_struct *mm)
unsigned long addr; \ {
\ size_t i;
if (!arch_has_strict_perms()) \ unsigned long addr;
return; \
\ if (!arch_has_strict_perms())
for (i = 0; i < ARRAY_SIZE(perms); i++) { \ return;
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ for (i = 0; i < n; i++) {
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
perms[i].start, perms[i].end, \ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
SECTION_SIZE); \ pr_err("BUG: section %lx-%lx not aligned to %lx\n",
continue; \ perms[i].start, perms[i].end,
} \ SECTION_SIZE);
\ continue;
for (addr = perms[i].start; \ }
addr < perms[i].end; \
addr += SECTION_SIZE) \ for (addr = perms[i].start;
section_update(addr, perms[i].mask, \ addr < perms[i].end;
perms[i].field); \ addr += SECTION_SIZE)
} \ section_update(addr, perms[i].mask,
set ? perms[i].prot : perms[i].clear, mm);
}
} }
static inline void fix_kernmem_perms(void) static void update_sections_early(struct section_perm perms[], int n)
{ {
set_section_perms(nx_perms, prot); struct task_struct *t, *s;
read_lock(&tasklist_lock);
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
set_section_perms(perms, n, true, s->mm);
}
read_unlock(&tasklist_lock);
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
int __fix_kernmem_perms(void *unused)
{
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
return 0;
}
void fix_kernmem_perms(void)
{
stop_machine(__fix_kernmem_perms, NULL, NULL);
} }
#ifdef CONFIG_DEBUG_RODATA #ifdef CONFIG_DEBUG_RODATA
int __mark_rodata_ro(void *unused)
{
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
return 0;
}
void mark_rodata_ro(void) void mark_rodata_ro(void)
{ {
set_section_perms(ro_perms, prot); stop_machine(__mark_rodata_ro, NULL, NULL);
} }
void set_kernel_text_rw(void) void set_kernel_text_rw(void)
{ {
set_section_perms(ro_perms, clear); set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
current->active_mm);
} }
void set_kernel_text_ro(void) void set_kernel_text_ro(void)
{ {
set_section_perms(ro_perms, prot); set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm);
} }
#endif /* CONFIG_DEBUG_RODATA */ #endif /* CONFIG_DEBUG_RODATA */
......
...@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area) ...@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
.equ cpu_v7_suspend_size, 4 * 9 .equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND #ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend) ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r10, lr} stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
stmia r0!, {r4 - r5} stmia r0!, {r4 - r5}
...@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend) ...@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r5 - r11} stmia r0, {r5 - r11}
ldmfd sp!, {r4 - r10, pc} ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_v7_do_suspend) ENDPROC(cpu_v7_do_suspend)
ENTRY(cpu_v7_do_resume) ENTRY(cpu_v7_do_resume)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment