Commit 7e40b56c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "Some small fixes that have been accumulated:

   - Chris Cole noticed that in a SMP environment, the DMA cache
     coherence handling can produce undesirable results in a corner
     case

   - Propagate that fix for ARMv7M as well

   - Fix a false positive with source fortification

   - Fix an uninitialised return that Nathan Jones spotted"

* 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 8816/1: dma-mapping: fix potential uninitialized return
  ARM: 8815/1: V7M: align v7m_dma_inv_range() with v7 counterpart
  ARM: 8814/1: mm: improve/fix ARM v7_dma_inv_range() unaligned address handling
  ARM: 8806/1: kprobes: Fix false positive with FORTIFY_SOURCE
parents abb8d6ec c2a3831d
...@@ -360,14 +360,16 @@ v7_dma_inv_range: ...@@ -360,14 +360,16 @@ v7_dma_inv_range:
ALT_UP(W(nop)) ALT_UP(W(nop))
#endif #endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
addne r0, r0, r2
tst r1, r3 tst r1, r3
bic r1, r1, r3 bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
cmp r0, r1 cmp r0, r1
1:
mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
addlo r0, r0, r2
cmplo r0, r1
blo 1b blo 1b
dsb st dsb st
ret lr ret lr
......
...@@ -73,9 +73,11 @@ ...@@ -73,9 +73,11 @@
/* /*
* dcimvac: Invalidate data cache line by MVA to PoC * dcimvac: Invalidate data cache line by MVA to PoC
*/ */
.macro dcimvac, rt, tmp .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC .macro dcimvac\c, rt, tmp
v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
.endm .endm
.endr
/* /*
* dccmvau: Clean data cache line by MVA to PoU * dccmvau: Clean data cache line by MVA to PoU
...@@ -369,14 +371,16 @@ v7m_dma_inv_range: ...@@ -369,14 +371,16 @@ v7m_dma_inv_range:
tst r0, r3 tst r0, r3
bic r0, r0, r3 bic r0, r0, r3
dccimvacne r0, r3 dccimvacne r0, r3
addne r0, r0, r2
subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
tst r1, r3 tst r1, r3
bic r1, r1, r3 bic r1, r1, r3
dccimvacne r1, r3 dccimvacne r1, r3
1:
dcimvac r0, r3
add r0, r0, r2
cmp r0, r1 cmp r0, r1
1:
dcimvaclo r0, r3
addlo r0, r0, r2
cmplo r0, r1
blo 1b blo 1b
dsb st dsb st
ret lr ret lr
......
...@@ -829,7 +829,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -829,7 +829,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
int ret; int ret = -ENXIO;
unsigned long nr_vma_pages = vma_pages(vma); unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr); unsigned long pfn = dma_to_pfn(dev, dma_addr);
......
...@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or ...@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
} }
/* Copy arch-dep-instance from template. */ /* Copy arch-dep-instance from template. */
memcpy(code, &optprobe_template_entry, memcpy(code, (unsigned char *)optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t)); TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */ /* Adjust buffer according to instruction. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment