Commit 8139951c authored by Joonyoung Shim's avatar Joonyoung Shim Committed by Inki Dae

drm/exynos: stop using sgtable in page fault handler

Already struct exynos_drm_gem_buf has pages of the buffer when buffer is
created, so just can use pages in page fault handler, we don't have to
make sgtable of the buffer. But this needs to construct pages of the
buffer that is imported from dma-buf prime.
Signed-off-by: default avatarJoonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: default avatarInki Dae <inki.dae@samsung.com>
parent 2b8376c8
...@@ -90,23 +90,12 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, ...@@ -90,23 +90,12 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
} }
} }
buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
if (IS_ERR(buf->sgt)) {
DRM_ERROR("failed to get sg table.\n");
ret = PTR_ERR(buf->sgt);
goto err_free_attrs;
}
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr, (unsigned long)buf->dma_addr,
buf->size); buf->size);
return ret; return ret;
err_free_attrs:
dma_free_attrs(dev->dev, buf->size, buf->pages,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
err_free: err_free:
if (!is_drm_iommu_supported(dev)) if (!is_drm_iommu_supported(dev))
drm_free_large(buf->pages); drm_free_large(buf->pages);
...@@ -126,11 +115,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, ...@@ -126,11 +115,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
(unsigned long)buf->dma_addr, (unsigned long)buf->dma_addr,
buf->size); buf->size);
sg_free_table(buf->sgt);
kfree(buf->sgt);
buf->sgt = NULL;
if (!is_drm_iommu_supported(dev)) { if (!is_drm_iommu_supported(dev)) {
dma_free_attrs(dev->dev, buf->size, buf->cookie, dma_free_attrs(dev->dev, buf->size, buf->cookie,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs); (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
......
...@@ -203,6 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, ...@@ -203,6 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl; struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer; struct exynos_drm_gem_buf *buffer;
int npages;
int ret; int ret;
/* is this one of own objects? */ /* is this one of own objects? */
...@@ -251,6 +252,20 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, ...@@ -251,6 +252,20 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
buffer->size = dma_buf->size; buffer->size = dma_buf->size;
buffer->dma_addr = sg_dma_address(sgl); buffer->dma_addr = sg_dma_address(sgl);
npages = dma_buf->size >> PAGE_SHIFT;
buffer->pages = drm_malloc_ab(npages, sizeof(struct page *));
if (!buffer->pages) {
ret = -ENOMEM;
goto err_free_gem;
}
ret = drm_prime_sg_to_page_addr_arrays(sgt, buffer->pages, NULL,
npages);
if (ret < 0) {
drm_free_large(buffer->pages);
goto err_free_gem;
}
if (sgt->nents == 1) { if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */ /* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
...@@ -273,6 +288,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, ...@@ -273,6 +288,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base; return &exynos_gem_obj->base;
err_free_gem:
drm_gem_object_release(&exynos_gem_obj->base);
kfree(exynos_gem_obj);
err_free_buffer: err_free_buffer:
kfree(buffer); kfree(buffer);
buffer = NULL; buffer = NULL;
......
...@@ -83,26 +83,14 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, ...@@ -83,26 +83,14 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
{ {
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
struct scatterlist *sgl;
unsigned long pfn; unsigned long pfn;
int i;
if (!buf->sgt)
return -EINTR;
if (page_offset >= (buf->size >> PAGE_SHIFT)) { if (page_offset >= (buf->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n"); DRM_ERROR("invalid page offset\n");
return -EINVAL; return -EINVAL;
} }
sgl = buf->sgt->sgl; pfn = page_to_pfn(buf->pages[page_offset]);
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
if (page_offset < (sgl->length >> PAGE_SHIFT))
break;
page_offset -= (sgl->length >> PAGE_SHIFT);
}
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn); return vm_insert_mixed(vma, f_vaddr, pfn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment