Commit 56b880e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes-for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull DMA-mapping fixes from Marek Szyprowski:
 "A set of minor fixes for dma-mapping code (ARM and x86) required for
  Contiguous Memory Allocator (CMA) patches merged in v3.5-rc1."

* 'fixes-for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  x86: dma-mapping: fix broken allocation when dma_mask has been provided
  ARM: dma-mapping: fix debug messages in dmabounce code
  ARM: mm: fix type of the arm_dma_limit global variable
  ARM: dma-mapping: Add missing static storage class specifier
parents 1043e3be c080e26e
...@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
struct safe_buffer *buf; struct safe_buffer *buf;
unsigned long off; unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir); __func__, addr, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__); buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf) if (!buf)
...@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
BUG_ON(buf->direction != dir); BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
buf->safe, buf->safe_dma_addr); buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++); DO_STATS(dev->archdata.dmabounce->bounce_count++);
...@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, ...@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
struct safe_buffer *buf; struct safe_buffer *buf;
unsigned long off; unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir); __func__, addr, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__); buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf) if (!buf)
...@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, ...@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
BUG_ON(buf->direction != dir); BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
buf->safe, buf->safe_dma_addr); buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++); DO_STATS(dev->archdata.dmabounce->bounce_count++);
......
...@@ -228,7 +228,7 @@ static pte_t **consistent_pte; ...@@ -228,7 +228,7 @@ static pte_t **consistent_pte;
#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
void __init init_consistent_dma_size(unsigned long size) void __init init_consistent_dma_size(unsigned long size)
{ {
...@@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = { ...@@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list), .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
}; };
size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
static int __init early_coherent_pool(char *p) static int __init early_coherent_pool(char *p)
{ {
......
...@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); ...@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* allocations. This must be the smallest DMA mask in the system, * allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this. * so a successful GFP_DMA allocation will always satisfy this.
*/ */
u32 arm_dma_limit; phys_addr_t arm_dma_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size) unsigned long dma_size)
......
...@@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page ...@@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif #endif
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
extern u32 arm_dma_limit; extern phys_addr_t arm_dma_limit;
#else #else
#define arm_dma_limit ((u32)~0) #define arm_dma_limit ((u32)~0)
#endif #endif
......
...@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, ...@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
unsigned long dma_mask; unsigned long dma_mask;
struct page *page = NULL; struct page *page;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr; dma_addr_t addr;
...@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, ...@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
flag |= __GFP_ZERO; flag |= __GFP_ZERO;
again: again:
page = NULL;
if (!(flag & GFP_ATOMIC)) if (!(flag & GFP_ATOMIC))
page = dma_alloc_from_contiguous(dev, count, get_order(size)); page = dma_alloc_from_contiguous(dev, count, get_order(size));
if (!page) if (!page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment