Commit 0f34faf8 authored by Rebecca Schultz Zavin's avatar Rebecca Schultz Zavin Committed by Greg Kroah-Hartman

gpu: ion: support begin/end and kmap/kunmap dma_buf ops

These ops were added in the 3.4 kernel.  This patch adds support
for them to ion.  Previous ion_map/unmap_kernel api is also
retained in addition to this new api.
Signed-off-by: default avatarRebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b15934b6
...@@ -391,37 +391,40 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle, ...@@ -391,37 +391,40 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
return ret; return ret;
} }
static void *ion_handle_kmap_get(struct ion_handle *handle) static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{ {
struct ion_buffer *buffer = handle->buffer;
void *vaddr; void *vaddr;
if (handle->kmap_cnt) { if (buffer->kmap_cnt) {
handle->kmap_cnt++;
return buffer->vaddr;
} else if (buffer->kmap_cnt) {
handle->kmap_cnt++;
buffer->kmap_cnt++; buffer->kmap_cnt++;
return buffer->vaddr; return buffer->vaddr;
} }
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
buffer->vaddr = vaddr; if (IS_ERR_OR_NULL(vaddr))
if (IS_ERR_OR_NULL(vaddr)) {
buffer->vaddr = NULL;
return vaddr; return vaddr;
} buffer->vaddr = vaddr;
handle->kmap_cnt++;
buffer->kmap_cnt++; buffer->kmap_cnt++;
return vaddr; return vaddr;
} }
static void ion_handle_kmap_put(struct ion_handle *handle) static void *ion_handle_kmap_get(struct ion_handle *handle)
{ {
struct ion_buffer *buffer = handle->buffer; struct ion_buffer *buffer = handle->buffer;
void *vaddr;
handle->kmap_cnt--; if (handle->kmap_cnt) {
if (!handle->kmap_cnt) handle->kmap_cnt++;
return buffer->vaddr;
}
vaddr = ion_buffer_kmap_get(buffer);
if (IS_ERR_OR_NULL(vaddr))
return vaddr;
handle->kmap_cnt++;
return vaddr;
}
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
buffer->kmap_cnt--; buffer->kmap_cnt--;
if (!buffer->kmap_cnt) { if (!buffer->kmap_cnt) {
buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
...@@ -429,6 +432,15 @@ static void ion_handle_kmap_put(struct ion_handle *handle) ...@@ -429,6 +432,15 @@ static void ion_handle_kmap_put(struct ion_handle *handle)
} }
} }
static void ion_handle_kmap_put(struct ion_handle *handle)
{
struct ion_buffer *buffer = handle->buffer;
handle->kmap_cnt--;
if (!handle->kmap_cnt)
ion_buffer_kmap_put(buffer);
}
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{ {
struct ion_buffer *buffer; struct ion_buffer *buffer;
...@@ -675,7 +687,8 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf) ...@@ -675,7 +687,8 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{ {
return NULL; struct ion_buffer *buffer = dmabuf->priv;
return buffer->vaddr + offset;
} }
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
...@@ -684,26 +697,49 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, ...@@ -684,26 +697,49 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
return; return;
} }
static void *ion_dma_buf_kmap_atomic(struct dma_buf *dmabuf, static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
unsigned long offset) size_t len,
enum dma_data_direction direction)
{ {
return NULL; struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
if (!buffer->heap->ops->map_kernel) {
pr_err("%s: map kernel is not implemented by this heap.\n",
__func__);
return -ENODEV;
}
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
if (!vaddr)
return -ENOMEM;
return 0;
} }
static void ion_dma_buf_kunmap_atomic(struct dma_buf *dmabuf, static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
unsigned long offset, void *ptr) size_t len,
enum dma_data_direction direction)
{ {
return; struct ion_buffer *buffer = dmabuf->priv;
}
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
struct dma_buf_ops dma_buf_ops = { struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf, .map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf, .unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap, .mmap = ion_mmap,
.release = ion_dma_buf_release, .release = ion_dma_buf_release,
.kmap_atomic = ion_dma_buf_kmap_atomic, .begin_cpu_access = ion_dma_buf_begin_cpu_access,
.kunmap_atomic = ion_dma_buf_kunmap_atomic, .end_cpu_access = ion_dma_buf_end_cpu_access,
.kmap_atomic = ion_dma_buf_kmap,
.kunmap_atomic = ion_dma_buf_kunmap,
.kmap = ion_dma_buf_kmap, .kmap = ion_dma_buf_kmap,
.kunmap = ion_dma_buf_kunmap, .kunmap = ion_dma_buf_kunmap,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment