Commit d1256667 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7314/1: kuser: consistently use usr_ret for returning from helpers
  ARM: 7302/1: Add TLB flushing for both entries in a PMD
  ARM: 7303/1: perf: add empty NODE event definitions for Cortex-A5 and Cortex-A15
  ARM: 7308/1: vfp: flush thread hwstate before copying ptrace registers
  ARM: 7307/1: vfp: fix ptrace regset modification race
  ARM: 7306/1: vfp: flush thread hwstate before restoring context from sigframe
  Revert "ARM: 7304/1: ioremap: fix boundary check when reusing static mapping"
parents 7c7ed8ec 5a97d0ae
...@@ -198,7 +198,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, ...@@ -198,7 +198,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr) unsigned long addr)
{ {
pgtable_page_dtor(pte); pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
*/
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
tlb_remove_page(tlb, pte); tlb_remove_page(tlb, pte);
} }
......
...@@ -790,7 +790,7 @@ __kuser_cmpxchg64: @ 0xffff0f60 ...@@ -790,7 +790,7 @@ __kuser_cmpxchg64: @ 0xffff0f60
smp_dmb arm smp_dmb arm
rsbs r0, r3, #0 @ set returned val and C flag rsbs r0, r3, #0 @ set returned val and C flag
ldmfd sp!, {r4, r5, r6, r7} ldmfd sp!, {r4, r5, r6, r7}
bx lr usr_ret lr
#elif !defined(CONFIG_SMP) #elif !defined(CONFIG_SMP)
......
...@@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] ...@@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
}, },
}, },
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
}; };
/* /*
...@@ -579,6 +593,20 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] ...@@ -579,6 +593,20 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
}, },
}, },
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
}; };
/* /*
......
...@@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target, ...@@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target,
{ {
int ret; int ret;
struct thread_info *thread = task_thread_info(target); struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct new_vfp = thread->vfpstate.hard; struct vfp_hard_struct new_vfp;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
vfp_sync_hwstate(thread);
new_vfp = thread->vfpstate.hard;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs, &new_vfp.fpregs,
user_fpregs_offset, user_fpregs_offset,
...@@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target, ...@@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target,
if (ret) if (ret)
return ret; return ret;
vfp_sync_hwstate(thread);
thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread); vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
return 0; return 0;
} }
......
...@@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) ...@@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL; return -EINVAL;
vfp_flush_hwstate(thread);
/* /*
* Copy the floating point registers. There can be unused * Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details. * registers see asm/hwcap.h for details.
...@@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) ...@@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
if (!err)
vfp_flush_hwstate(thread);
return err ? -EFAULT : 0; return err ? -EFAULT : 0;
} }
......
...@@ -225,8 +225,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, ...@@ -225,8 +225,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
continue; continue;
if (__phys_to_pfn(area->phys_addr) > pfn || if (__phys_to_pfn(area->phys_addr) > pfn ||
__pfn_to_phys(pfn) + offset + size-1 > __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
area->phys_addr + area->size-1)
continue; continue;
/* we can drop the lock here as we know *area is static */ /* we can drop the lock here as we know *area is static */
read_unlock(&vmlist_lock); read_unlock(&vmlist_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment