Commit bd43c9f0 authored by Thierry Reding's avatar Thierry Reding

drm/tegra: gem: Map pages via the DMA API

When allocating pages, map them with the DMA API in order to invalidate
caches. This is the correct usage of the API and works just as well as
faking up the SG table and using the dma_sync_sg_for_device() function.
Signed-off-by: default avatarThierry Reding <treding@nvidia.com>
parent 0281c414
...@@ -203,6 +203,8 @@ static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, ...@@ -203,6 +203,8 @@ static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{ {
if (bo->pages) { if (bo->pages) {
dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
DMA_BIDIRECTIONAL);
drm_gem_put_pages(&bo->gem, bo->pages, true, true); drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt); sg_free_table(bo->sgt);
kfree(bo->sgt); kfree(bo->sgt);
...@@ -213,8 +215,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) ...@@ -213,8 +215,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{ {
struct scatterlist *s; int err;
unsigned int i;
bo->pages = drm_gem_get_pages(&bo->gem); bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages)) if (IS_ERR(bo->pages))
...@@ -223,27 +224,26 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) ...@@ -223,27 +224,26 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
bo->num_pages = bo->gem.size >> PAGE_SHIFT; bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto put_pages; goto put_pages;
}
/* err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
* Fake up the SG table so that dma_sync_sg_for_device() can be used DMA_BIDIRECTIONAL);
* to flush the pages associated with it. if (err == 0) {
* err = -EFAULT;
* TODO: Replace this by drm_clflash_sg() once it can be implemented goto free_sgt;
* without relying on symbols that are not exported. }
*/
for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
sg_dma_address(s) = sg_phys(s);
dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
DMA_TO_DEVICE);
return 0; return 0;
free_sgt:
sg_free_table(bo->sgt);
kfree(bo->sgt);
put_pages: put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false); drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(bo->sgt); return err;
} }
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment