Commit 465ed660 authored by Prathyush K's avatar Prathyush K Committed by Inki Dae

drm/exynos: remove 'pages' and 'page_size' elements in exynos gem buffer

Changelog v2:

Removed redundant check for invalid sgl.
Added check for valid page_offset in the beginning of exynos_drm_gem_map_buf.

Changelog v1:

The 'pages' structure is not required since we can use the 'sgt'. Even for
CONTIG buffers, a SGT is created (which will have just one sgl). This SGT
can be used during mmap instead of 'pages'. The 'page_size' element of the
structure is also not used anywhere and is removed.
This patch also fixes a memory leak where the 'pages' structure was being
allocated during gem buffer allocation but not being freed during deallocate.
Signed-off-by: default avatarPrathyush K <prathyush.k@samsung.com>
Signed-off-by: default avatarInki Dae <inki.dae@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
parent dd265850
...@@ -34,8 +34,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, ...@@ -34,8 +34,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf) unsigned int flags, struct exynos_drm_gem_buf *buf)
{ {
int ret = 0; int ret = 0;
unsigned int npages, i = 0;
struct scatterlist *sgl;
enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS; enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
...@@ -73,22 +71,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, ...@@ -73,22 +71,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
goto err_free_sgt; goto err_free_sgt;
} }
npages = buf->sgt->nents;
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
if (!buf->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
goto err_free_table;
}
sgl = buf->sgt->sgl;
while (i < npages) {
buf->pages[i] = sg_page(sgl);
sgl = sg_next(sgl);
i++;
}
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->kvaddr, (unsigned long)buf->kvaddr,
(unsigned long)buf->dma_addr, (unsigned long)buf->dma_addr,
...@@ -96,8 +78,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, ...@@ -96,8 +78,6 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return ret; return ret;
err_free_table:
sg_free_table(buf->sgt);
err_free_sgt: err_free_sgt:
kfree(buf->sgt); kfree(buf->sgt);
buf->sgt = NULL; buf->sgt = NULL;
......
...@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, ...@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer); struct exynos_drm_gem_buf *buffer);
/* allocate physical memory region and setup sgt and pages. */ /* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev, int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf, struct exynos_drm_gem_buf *buf,
unsigned int flags); unsigned int flags);
/* release physical memory region, sgt and pages. */ /* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev, void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags, unsigned int flags,
struct exynos_drm_gem_buf *buffer); struct exynos_drm_gem_buf *buffer);
......
...@@ -87,8 +87,7 @@ static struct sg_table * ...@@ -87,8 +87,7 @@ static struct sg_table *
goto err_unlock; goto err_unlock;
} }
DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n", DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
buf->size, buf->page_size);
err_unlock: err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -99,34 +99,23 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, ...@@ -99,34 +99,23 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
unsigned long pfn; unsigned long pfn;
int i; int i;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { if (!buf->sgt)
if (!buf->sgt) return -EINTR;
return -EINTR;
sgl = buf->sgt->sgl;
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
if (!sgl) {
DRM_ERROR("invalid SG table\n");
return -EINTR;
}
if (page_offset < (sgl->length >> PAGE_SHIFT))
break;
page_offset -= (sgl->length >> PAGE_SHIFT);
}
if (i >= buf->sgt->nents) {
DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; if (page_offset >= (buf->size >> PAGE_SHIFT)) {
} else { DRM_ERROR("invalid page offset\n");
if (!buf->pages) return -EINVAL;
return -EINTR; }
pfn = page_to_pfn(buf->pages[0]) + page_offset; sgl = buf->sgt->sgl;
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
if (page_offset < (sgl->length >> PAGE_SHIFT))
break;
page_offset -= (sgl->length >> PAGE_SHIFT);
} }
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn); return vm_insert_mixed(vma, f_vaddr, pfn);
} }
......
...@@ -41,8 +41,6 @@ ...@@ -41,8 +41,6 @@
* device address with IOMMU. * device address with IOMMU.
* @write: whether pages will be written to by the caller. * @write: whether pages will be written to by the caller.
* @sgt: sg table to transfer page data. * @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
* @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region. * @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with * @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not. * VM_PFNMAP or not.
...@@ -54,8 +52,6 @@ struct exynos_drm_gem_buf { ...@@ -54,8 +52,6 @@ struct exynos_drm_gem_buf {
struct dma_attrs dma_attrs; struct dma_attrs dma_attrs;
unsigned int write; unsigned int write;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages;
unsigned long page_size;
unsigned long size; unsigned long size;
bool pfnmap; bool pfnmap;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment