Commit a6933571 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: simplify dma_init_coherent_memory

Return the allocated dma_coherent_mem structure, set the
use_dma_pfn_offset and print the failure warning inside of
dma_init_coherent_memory instead of leaving that to the callers.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarDillon Min <dillon.minfei@gmail.com>
parent 70d6aa0e
......@@ -37,51 +37,44 @@ static inline dma_addr_t dma_get_device_base(struct device *dev,
return mem->device_base;
}
static int dma_init_coherent_memory(phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size,
struct dma_coherent_mem **mem)
static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
{
struct dma_coherent_mem *dma_mem = NULL;
void *mem_base = NULL;
struct dma_coherent_mem *dma_mem;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
int ret;
void *mem_base;
if (!size) {
ret = -EINVAL;
goto out;
}
if (!size)
return ERR_PTR(-EINVAL);
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
if (!mem_base) {
ret = -EINVAL;
goto out;
}
if (!mem_base)
return ERR_PTR(-EINVAL);
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
if (!dma_mem) {
ret = -ENOMEM;
goto out;
}
if (!dma_mem)
goto out_unmap_membase;
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!dma_mem->bitmap) {
ret = -ENOMEM;
goto out;
}
if (!dma_mem->bitmap)
goto out_free_dma_mem;
dma_mem->virt_base = mem_base;
dma_mem->device_base = device_addr;
dma_mem->pfn_base = PFN_DOWN(phys_addr);
dma_mem->size = pages;
dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
return 0;
return dma_mem;
out:
out_free_dma_mem:
kfree(dma_mem);
if (mem_base)
memunmap(mem_base);
return ret;
out_unmap_membase:
memunmap(mem_base);
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
&phys_addr, size / SZ_1M);
return ERR_PTR(-ENOMEM);
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
......@@ -130,9 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
struct dma_coherent_mem *mem;
int ret;
ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
if (ret)
return ret;
mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
if (IS_ERR(mem))
return PTR_ERR(mem);
ret = dma_assign_coherent_memory(dev, mem);
if (ret)
......@@ -319,21 +312,16 @@ static struct reserved_mem *dma_reserved_default_memory __initdata;
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
int ret;
if (!mem) {
ret = dma_init_coherent_memory(rmem->base, rmem->base,
rmem->size, &mem);
if (ret) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return ret;
}
if (!rmem->priv) {
struct dma_coherent_mem *mem;
mem = dma_init_coherent_memory(rmem->base, rmem->base,
rmem->size, true);
if (IS_ERR(mem))
return PTR_ERR(mem);
rmem->priv = mem;
}
mem->use_dev_dma_pfn_offset = true;
rmem->priv = mem;
dma_assign_coherent_memory(dev, mem);
dma_assign_coherent_memory(dev, rmem->priv);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment