Commit 46bcde94 authored by Christoph Hellwig's avatar Christoph Hellwig

sh: split arch/sh/mm/consistent.c

Half of the file just contains platform device memory setup code which
is required for all builds, and half contains helpers for dma coherent
allocation, which is only needed if CONFIG_DMA_NONCOHERENT is enabled.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarYoshinori Sato <ysato@users.sourceforge.jp>
parent a602915f
......@@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
obj-$(CONFIG_DMA_NONCOHERENT) += dma-nommu.o
obj-$(CONFIG_DMA_NONCOHERENT) += dma-nommu.o dma-coherent.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
ccflags-y := -Werror
/*
* Copyright (C) 2004 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
{
void *ret, *ret_nocache;
int order = get_order(size);
gfp |= __GFP_ZERO;
ret = (void *)__get_free_pages(gfp, order);
if (!ret)
return NULL;
/*
* Pages from the page allocator may have data present in
* cache. So flush the cache before using uncached memory.
*/
sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
if (!ret_nocache) {
free_pages((unsigned long)ret, order);
return NULL;
}
split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
*dma_handle = virt_to_phys(ret);
if (!WARN_ON(!dev))
*dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
return ret_nocache;
}
void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
int order = get_order(size);
unsigned long pfn = (dma_handle >> PAGE_SHIFT);
int k;
if (!WARN_ON(!dev))
pfn += dev->dma_pfn_offset;
for (k = 0; k < (1 << order); k++)
__free_pages(pfn_to_page(pfn + k), 0);
iounmap(vaddr);
}
void sh_sync_dma_for_device(void *vaddr, size_t size,
enum dma_data_direction direction)
{
void *addr = sh_cacheop_vaddr(vaddr);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
__flush_invalidate_region(addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
__flush_wback_region(addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
__flush_purge_region(addr, size);
break;
default:
BUG();
}
}
/*
* arch/sh/mm/consistent.c
*
* Copyright (C) 2004 - 2007 Paul Mundt
*
* Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
......@@ -13,83 +9,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
{
void *ret, *ret_nocache;
int order = get_order(size);
gfp |= __GFP_ZERO;
ret = (void *)__get_free_pages(gfp, order);
if (!ret)
return NULL;
/*
* Pages from the page allocator may have data present in
* cache. So flush the cache before using uncached memory.
*/
sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
if (!ret_nocache) {
free_pages((unsigned long)ret, order);
return NULL;
}
split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
*dma_handle = virt_to_phys(ret);
if (!WARN_ON(!dev))
*dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
return ret_nocache;
}
void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT;
int k;
if (!WARN_ON(!dev))
pfn += dev->dma_pfn_offset;
for (k = 0; k < (1 << order); k++)
__free_pages(pfn_to_page(pfn + k), 0);
iounmap(vaddr);
}
void sh_sync_dma_for_device(void *vaddr, size_t size,
enum dma_data_direction direction)
{
void *addr = sh_cacheop_vaddr(vaddr);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
__flush_invalidate_region(addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
__flush_wback_region(addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
__flush_purge_region(addr, size);
break;
default:
BUG();
}
}
static int __init memchunk_setup(char *str)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment