Commit c67e6279 authored by Christian König's avatar Christian König

drm/prime: split array import functions v4

Mapping the imported pages of a DMA-buf into an userspace process
doesn't work as expected.

But we have reoccurring requests on this approach, so split the
functions for this and  document that dma_buf_mmap() needs to be used
instead.

v2: split it into two functions
v3: rebased on latest changes
v4: update commit message a bit
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/403838/
parent 18f7608a
...@@ -918,7 +918,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, ...@@ -918,7 +918,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
goto release_sg; goto release_sg;
/* convert SG to linear array of pages and dma addresses */ /* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL, gtt->ttm.dma_address, drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
ttm->num_pages); ttm->num_pages);
return 0; return 0;
...@@ -1264,8 +1264,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -1264,8 +1264,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
ttm->sg = sgt; ttm->sg = sgt;
} }
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL, drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
gtt->ttm.dma_address,
ttm->num_pages); ttm->num_pages);
return 0; return 0;
} }
......
...@@ -978,44 +978,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, ...@@ -978,44 +978,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_prime_import); EXPORT_SYMBOL(drm_gem_prime_import);
/** /**
* drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array * drm_prime_sg_to_page_array - convert an sg table into a page array
* @sgt: scatter-gather table to convert * @sgt: scatter-gather table to convert
* @pages: optional array of page pointers to store the page array in * @pages: array of page pointers to store the pages in
* @addrs: optional array to store the dma bus address of each page * @max_entries: size of the passed-in array
* @max_entries: size of both the passed-in arrays
* *
* Exports an sg table into an array of pages and addresses. This is currently * Exports an sg table into an array of pages.
* required by the TTM driver in order to do correct fault handling.
* *
* Drivers can use this in their &drm_driver.gem_prime_import_sg_table * This function is deprecated and strongly discouraged to be used.
* implementation. * The page array is only useful for page faults and those can corrupt fields
* in the struct page if they are not handled by the exporting driver.
*/ */
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
dma_addr_t *addrs, int max_entries) struct page **pages,
int max_entries)
{ {
struct sg_dma_page_iter dma_iter;
struct sg_page_iter page_iter; struct sg_page_iter page_iter;
struct page **p = pages; struct page **p = pages;
dma_addr_t *a = addrs;
if (pages) {
for_each_sgtable_page(sgt, &page_iter, 0) { for_each_sgtable_page(sgt, &page_iter, 0) {
if (WARN_ON(p - pages >= max_entries)) if (WARN_ON(p - pages >= max_entries))
return -1; return -1;
*p++ = sg_page_iter_page(&page_iter); *p++ = sg_page_iter_page(&page_iter);
} }
} return 0;
if (addrs) { }
EXPORT_SYMBOL(drm_prime_sg_to_page_array);
/**
* drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
* @sgt: scatter-gather table to convert
* @addrs: array to store the dma bus address of each page
* @max_entries: size of both the passed-in arrays
*
* Exports an sg table into an array of addresses.
*
* Drivers should use this in their &drm_driver.gem_prime_import_sg_table
* implementation.
*/
int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
int max_entries)
{
struct sg_dma_page_iter dma_iter;
dma_addr_t *a = addrs;
for_each_sgtable_dma_page(sgt, &dma_iter, 0) { for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
if (WARN_ON(a - addrs >= max_entries)) if (WARN_ON(a - addrs >= max_entries))
return -1; return -1;
*a++ = sg_page_iter_dma_address(&dma_iter); *a++ = sg_page_iter_dma_address(&dma_iter);
} }
}
return 0; return 0;
} }
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
/** /**
* drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
......
...@@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
goto fail; goto fail;
} }
ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages);
NULL, npages);
if (ret) if (ret)
goto fail; goto fail;
......
...@@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) ...@@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return -ENOMEM; return -ENOMEM;
} }
drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL)); pgprot_writecombine(PAGE_KERNEL));
......
...@@ -1180,7 +1180,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -1180,7 +1180,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
goto fail; goto fail;
} }
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) { if (ret) {
mutex_unlock(&msm_obj->lock); mutex_unlock(&msm_obj->lock);
goto fail; goto fail;
......
...@@ -1235,8 +1235,7 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -1235,8 +1235,7 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
return 0; return 0;
if (slave && ttm->sg) { if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL, drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
ttm_dma->dma_address,
ttm->num_pages); ttm->num_pages);
return 0; return 0;
} }
......
...@@ -1324,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, ...@@ -1324,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
} }
omap_obj->pages = pages; omap_obj->pages = pages;
ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, ret = drm_prime_sg_to_page_array(sgt, pages, npages);
npages);
if (ret) { if (ret) {
omap_gem_free_object(obj); omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM); obj = ERR_PTR(-ENOMEM);
......
...@@ -395,7 +395,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt * ...@@ -395,7 +395,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
if (r) if (r)
goto release_sg; goto release_sg;
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL, gtt->ttm.dma_address, drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
ttm->num_pages); ttm->num_pages);
return 0; return 0;
...@@ -574,8 +574,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -574,8 +574,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
} }
if (slave && ttm->sg) { if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, NULL, drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
gtt->ttm.dma_address,
ttm->num_pages); ttm->num_pages);
return 0; return 0;
} }
......
...@@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, ...@@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
} }
obj->pages_pin_count++; /* perma-pinned */ obj->pages_pin_count++; /* perma-pinned */
drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
npages);
return &obj->base; return &obj->base;
} }
......
...@@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, ...@@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
xen_obj->sgt_imported = sgt; xen_obj->sgt_imported = sgt;
ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages, ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
NULL, xen_obj->num_pages); xen_obj->num_pages);
if (ret < 0) if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -105,8 +105,9 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, ...@@ -105,8 +105,9 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, int drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages); int max_pages);
int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
int max_pages);
#endif /* __DRM_PRIME_H__ */ #endif /* __DRM_PRIME_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment