Commit df6cf5c8 authored by Colin Cross's avatar Colin Cross Committed by Greg Kroah-Hartman

ion: add helper to zero contiguous region of pages

Add ion_heap_pages_zero for ion heaps to use to zero pages
during initialization or allocation, when a struct ion_buffer
may not be available.  Use it from the chunk heap and carveout
heaps.
Signed-off-by: default avatarColin Cross <ccross@android.com>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ed5bf01a
......@@ -150,6 +150,19 @@ static struct ion_heap_ops carveout_heap_ops = {
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_carveout_heap *carveout_heap;
int ret;
struct page *page;
size_t size;
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
......
......@@ -140,9 +140,18 @@ static struct ion_heap_ops chunk_heap_ops = {
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_chunk_heap *chunk_heap;
struct vm_struct *vm_struct;
pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
int i, ret;
int ret;
struct page *page;
size_t size;
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap)
......@@ -159,26 +168,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->size = heap_data->size;
chunk_heap->allocated = 0;
vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
if (!vm_struct) {
ret = -ENOMEM;
goto error;
}
for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
struct page *page = pfn_to_page(PFN_DOWN(chunk_heap->base + i));
struct page **pages = &page;
ret = map_vm_area(vm_struct, pgprot, &pages);
if (ret)
goto error_map_vm_area;
memset(vm_struct->addr, 0, PAGE_SIZE);
unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
}
free_vm_area(vm_struct);
ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
heap_data->size, DMA_BIDIRECTIONAL);
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
......@@ -188,10 +177,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
return &chunk_heap->heap;
error_map_vm_area:
free_vm_area(vm_struct);
error:
gen_pool_destroy(chunk_heap->pool);
error_gen_pool_create:
kfree(chunk_heap);
return ERR_PTR(ret);
......
......@@ -114,38 +114,49 @@ static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
return 0;
}
static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
pgprot_t pgprot)
{
int p = 0;
int ret = 0;
struct sg_page_iter piter;
struct page *pages[32];
for_each_sg_page(sgl, &piter, nents, 0) {
pages[p++] = sg_page_iter_page(&piter);
if (p == ARRAY_SIZE(pages)) {
ret = ion_heap_clear_pages(pages, p, pgprot);
if (ret)
return ret;
p = 0;
}
}
if (p)
ret = ion_heap_clear_pages(pages, p, pgprot);
return ret;
}
int ion_heap_buffer_zero(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->sg_table;
pgprot_t pgprot;
struct scatterlist *sg;
int i, j, ret = 0;
struct page *pages[32];
int k = 0;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
unsigned long len = sg->length;
return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
}
for (j = 0; j < len / PAGE_SIZE; j++) {
pages[k++] = page + j;
if (k == ARRAY_SIZE(pages)) {
ret = ion_heap_clear_pages(pages, k, pgprot);
if (ret)
goto end;
k = 0;
}
}
if (k)
ret = ion_heap_clear_pages(pages, k, pgprot);
}
end:
return ret;
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
return ion_heap_sglist_zero(&sg, 1, pgprot);
}
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
......
......@@ -215,6 +215,7 @@ void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
struct vm_area_struct *);
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment