Commit e946b209 authored by Colin Cross's avatar Colin Cross Committed by Greg Kroah-Hartman

ion: fix dma APIs

__dma_page_cpu_to_dev is a private ARM api that is not available
on 3.10 and was never available on other architectures.  We can
get the same behavior by calling dma_sync_sg_for_device with a
scatterlist containing a single page.  It's still not quite a
kosher use of the dma apis, we still conflate physical addresses
with bus addresses, but it should at least compile on all
platforms, and work on any platform that doesn't have a physical
to bus address translation.
Signed-off-by: default avatarColin Cross <ccross@android.com>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent a14baf71
...@@ -840,6 +840,22 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, ...@@ -840,6 +840,22 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
{ {
} }
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
/*
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
* for the the targeted device, but this works on the currently targeted
* hardware.
*/
sg_dma_address(&sg) = page_to_phys(page);
dma_sync_sg_for_device(dev, &sg, 1, dir);
}
struct ion_vma_list { struct ion_vma_list {
struct list_head list; struct list_head list;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -864,7 +880,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer, ...@@ -864,7 +880,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct page *page = buffer->pages[i]; struct page *page = buffer->pages[i];
if (ion_buffer_page_is_dirty(page)) if (ion_buffer_page_is_dirty(page))
__dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir); ion_pages_sync_for_device(dev, ion_buffer_page(page),
PAGE_SIZE, dir);
ion_buffer_page_clean(buffer->pages + i); ion_buffer_page_clean(buffer->pages + i);
} }
list_for_each_entry(vma_list, &buffer->vmas, list) { list_for_each_entry(vma_list, &buffer->vmas, list) {
......
...@@ -106,11 +106,11 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) ...@@ -106,11 +106,11 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
ion_heap_buffer_zero(buffer); ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i) {
if (ion_buffer_cached(buffer)) if (ion_buffer_cached(buffer))
arm_dma_ops.sync_single_for_device(NULL, dma_sync_sg_for_device(NULL, table->sgl, table->nents,
pfn_to_dma(NULL, page_to_pfn(sg_page(sg))), DMA_BIDIRECTIONAL);
sg_dma_len(sg), DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg_dma_len(sg)); sg_dma_len(sg));
} }
...@@ -148,7 +148,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) ...@@ -148,7 +148,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
int i, ret; int i, ret;
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap) if (!chunk_heap)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -181,9 +180,9 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) ...@@ -181,9 +180,9 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
} }
free_vm_area(vm_struct); free_vm_area(vm_struct);
arm_dma_ops.sync_single_for_device(NULL, ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
heap_data->size, DMA_BIDIRECTIONAL); heap_data->size, DMA_BIDIRECTIONAL);
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
chunk_heap->heap.ops = &chunk_heap_ops; chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
......
...@@ -34,12 +34,7 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) ...@@ -34,12 +34,7 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page) if (!page)
return NULL; return NULL;
/* this is only being used to flush the page for dma, ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
this api is not really suitable for calling from a driver
but no better way to flush a page for dma exist at this time */
arm_dma_ops.sync_single_for_device(NULL,
pfn_to_dma(NULL, page_to_pfn(page)),
PAGE_SIZE << pool->order,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
return page; return page;
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#ifndef _ION_PRIV_H #ifndef _ION_PRIV_H
#define _ION_PRIV_H #define _ION_PRIV_H
#include <linux/dma-direction.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -357,4 +358,15 @@ void ion_page_pool_free(struct ion_page_pool *, struct page *); ...@@ -357,4 +358,15 @@ void ion_page_pool_free(struct ion_page_pool *, struct page *);
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan); int nr_to_scan);
/**
* ion_pages_sync_for_device - cache flush pages for use with the specified
* device
* @dev: the device the pages will be used with
* @page: the first page to be flushed
* @size: size in bytes of region to be flushed
* @dir: direction of dma transfer
*/
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
#endif /* _ION_PRIV_H */ #endif /* _ION_PRIV_H */
...@@ -77,9 +77,8 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap, ...@@ -77,9 +77,8 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
page = ion_heap_alloc_pages(buffer, gfp_flags, order); page = ion_heap_alloc_pages(buffer, gfp_flags, order);
if (!page) if (!page)
return 0; return 0;
arm_dma_ops.sync_single_for_device(NULL, ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
pfn_to_dma(NULL, page_to_pfn(page)), DMA_BIDIRECTIONAL);
PAGE_SIZE << order, DMA_BIDIRECTIONAL);
} }
if (!page) if (!page)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment