Commit 29ae6bc7 authored by Rebecca Schultz Zavin's avatar Rebecca Schultz Zavin Committed by Greg Kroah-Hartman

gpu: ion: Allocate the sg_table at creation time rather than dynamically

Rather than calling map_dma on the allocations dynamically, this patch
switches to creating the sg_table at the time the buffer is created.
This is necessary because in future updates the sg_table will be used
for cache maintenance.
Signed-off-by: default avatarRebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0f34faf8
...@@ -135,6 +135,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, ...@@ -135,6 +135,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
unsigned long flags) unsigned long flags)
{ {
struct ion_buffer *buffer; struct ion_buffer *buffer;
struct sg_table *table;
int ret; int ret;
buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
...@@ -149,6 +150,15 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, ...@@ -149,6 +150,15 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
kfree(buffer); kfree(buffer);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
table = buffer->heap->ops->map_dma(buffer->heap, buffer);
if (IS_ERR_OR_NULL(table)) {
heap->ops->free(buffer);
kfree(buffer);
return ERR_PTR(PTR_ERR(table));
}
buffer->sg_table = table;
buffer->dev = dev; buffer->dev = dev;
buffer->size = len; buffer->size = len;
mutex_init(&buffer->lock); mutex_init(&buffer->lock);
...@@ -164,9 +174,7 @@ static void ion_buffer_destroy(struct kref *kref) ...@@ -164,9 +174,7 @@ static void ion_buffer_destroy(struct kref *kref)
if (WARN_ON(buffer->kmap_cnt > 0)) if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
if (WARN_ON(buffer->dmap_cnt > 0)) buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer); buffer->heap->ops->free(buffer);
mutex_lock(&dev->lock); mutex_lock(&dev->lock);
rb_erase(&buffer->node, &dev->buffers); rb_erase(&buffer->node, &dev->buffers);
...@@ -346,6 +354,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, ...@@ -346,6 +354,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
mutex_unlock(&client->lock); mutex_unlock(&client->lock);
} }
return handle; return handle;
} }
...@@ -607,53 +616,42 @@ void ion_client_destroy(struct ion_client *client) ...@@ -607,53 +616,42 @@ void ion_client_destroy(struct ion_client *client)
kfree(client); kfree(client);
} }
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *ion_map_dma(struct ion_client *client,
enum dma_data_direction direction) struct ion_handle *handle)
{ {
struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer;
struct ion_buffer *buffer = dmabuf->priv;
struct sg_table *table; struct sg_table *table;
mutex_lock(&buffer->lock); mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
if (!buffer->heap->ops->map_dma) { pr_err("%s: invalid handle passed to map_dma.\n",
pr_err("%s: map_dma is not implemented by this heap.\n",
__func__); __func__);
mutex_unlock(&buffer->lock); mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV); return ERR_PTR(-EINVAL);
}
/* if an sg list already exists for this buffer just return it */
if (buffer->dmap_cnt) {
table = buffer->sg_table;
goto end;
} }
buffer = handle->buffer;
/* otherwise call into the heap to create one */ table = buffer->sg_table;
table = buffer->heap->ops->map_dma(buffer->heap, buffer); mutex_unlock(&client->lock);
if (IS_ERR_OR_NULL(table))
goto err;
buffer->sg_table = table;
end:
buffer->dmap_cnt++;
err:
mutex_unlock(&buffer->lock);
return table; return table;
} }
static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
struct sg_table *table, {
enum dma_data_direction direction) }
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{ {
struct dma_buf *dmabuf = attachment->dmabuf; struct dma_buf *dmabuf = attachment->dmabuf;
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock); return buffer->sg_table;
buffer->dmap_cnt--; }
if (!buffer->dmap_cnt) {
buffer->heap->ops->unmap_dma(buffer->heap, buffer); static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
buffer->sg_table = NULL; struct sg_table *table,
} enum dma_data_direction direction)
mutex_unlock(&buffer->lock); {
} }
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
...@@ -987,6 +985,11 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) ...@@ -987,6 +985,11 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct ion_heap *entry; struct ion_heap *entry;
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
heap->dev = dev; heap->dev = dev;
mutex_lock(&dev->lock); mutex_lock(&dev->lock);
while (*p) { while (*p) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment