Commit 51ea742c authored by Christian König's avatar Christian König

drm/qxl: stop using TTM to call driver internal functions

The ttm_mem_io_* functions were intended to be internal to TTM and
shouldn't have been used in a driver. They were exported in commit
afe6804c just for QXL.

Instead call the qxl_ttm_io_mem_reserve() function directly and
completely drop the free call since that is a dummy on QXL.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Link: https://patchwork.freedesktop.org/patch/333289/
parent ef383218
...@@ -355,6 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp, ...@@ -355,6 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */ /* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev); int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev); void qxl_ttm_fini(struct qxl_device *qdev);
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
/* qxl image */ /* qxl image */
......
...@@ -167,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) ...@@ -167,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset) struct qxl_bo *bo, int page_offset)
{ {
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr; void *rptr;
int ret; int ret;
struct io_mapping *map; struct io_mapping *map;
...@@ -179,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, ...@@ -179,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else else
goto fallback; goto fallback;
(void) ttm_mem_io_lock(man, false); ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback: fallback:
...@@ -212,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo) ...@@ -212,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap) struct qxl_bo *bo, void *pmap)
{ {
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) && if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
(bo->tbo.mem.mem_type != TTM_PL_PRIV)) (bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback; goto fallback;
io_mapping_unmap_atomic(pmap); io_mapping_unmap_atomic(pmap);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
return; return;
fallback: fallback:
qxl_bo_kunmap(bo); qxl_bo_kunmap(bo);
......
...@@ -110,7 +110,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, ...@@ -110,7 +110,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement; *placement = qbo->placement;
} }
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment