Commit 03189d5b authored by Chris Wilson's avatar Chris Wilson

drm: Remove defunct dma_buf_kmap stubs

Since commit 09ea0dfb ("dma-buf: make map_atomic and map function
pointers optional"), we no longer need to provide stub no-op functions
as the core now provides them directly.

References: 09ea0dfb ("dma-buf: make map_atomic and map function pointers optional")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180807174748.4503-1-chris@chris-wilson.co.uk
parent 9e37ee79
...@@ -339,8 +339,6 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = { ...@@ -339,8 +339,6 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.unmap_dma_buf = drm_gem_unmap_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release, .release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access, .begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
.unmap = drm_gem_dmabuf_kunmap,
.mmap = drm_gem_dmabuf_mmap, .mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap, .vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap, .vunmap = drm_gem_dmabuf_vunmap,
......
...@@ -433,34 +433,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) ...@@ -433,34 +433,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
} }
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
* drm_gem_dmabuf_kmap - map implementation for GEM
* @dma_buf: buffer to be mapped
* @page_num: page number within the buffer
*
* Not implemented. This can be used as the &dma_buf_ops.map callback.
*/
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
}
EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
/**
* drm_gem_dmabuf_kunmap - unmap implementation for GEM
* @dma_buf: buffer to be unmapped
* @page_num: page number within the buffer
* @addr: virtual address of the buffer
*
* Not implemented. This can be used as the &dma_buf_ops.unmap callback.
*/
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
void *addr)
{
}
EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
/** /**
* drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
* @dma_buf: buffer to be mapped * @dma_buf: buffer to be mapped
...@@ -489,8 +461,6 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { ...@@ -489,8 +461,6 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.map_dma_buf = drm_gem_map_dma_buf, .map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release, .release = drm_gem_dmabuf_release,
.map = drm_gem_dmabuf_kmap,
.unmap = drm_gem_dmabuf_kunmap,
.mmap = drm_gem_dmabuf_mmap, .mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap, .vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap, .vunmap = drm_gem_dmabuf_vunmap,
......
...@@ -93,9 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, ...@@ -93,9 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir); enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
void *addr);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment