Commit 79e2cf2e authored by Dmitry Osipenko's avatar Dmitry Osipenko

drm/gem: Take reservation lock for vmap/vunmap operations

The new common dma-buf locking convention will require buffer importers
to hold the reservation lock around mapping operations. Make DRM GEM core
to take the lock around the vmapping operations and update DRM drivers to
use the locked functions for the case where DRM core now holds the lock.
This patch prepares DRM core and drivers to the common dynamic dma-buf
locking convention.
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-4-dmitry.osipenko@collabora.com
parent 19d6634d
...@@ -323,7 +323,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, ...@@ -323,7 +323,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
* fd_install step out of the driver backend hooks, to make that * fd_install step out of the driver backend hooks, to make that
* final step optional for internal users. * final step optional for internal users.
*/ */
ret = drm_gem_vmap(buffer->gem, map); ret = drm_gem_vmap_unlocked(buffer->gem, map);
if (ret) if (ret)
return ret; return ret;
...@@ -345,7 +345,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) ...@@ -345,7 +345,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{ {
struct iosys_map *map = &buffer->map; struct iosys_map *map = &buffer->map;
drm_gem_vunmap(buffer->gem, map); drm_gem_vunmap_unlocked(buffer->gem, map);
} }
EXPORT_SYMBOL(drm_client_buffer_vunmap); EXPORT_SYMBOL(drm_client_buffer_vunmap);
......
...@@ -1171,6 +1171,8 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) ...@@ -1171,6 +1171,8 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{ {
int ret; int ret;
dma_resv_assert_held(obj->resv);
if (!obj->funcs->vmap) if (!obj->funcs->vmap)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1186,6 +1188,8 @@ EXPORT_SYMBOL(drm_gem_vmap); ...@@ -1186,6 +1188,8 @@ EXPORT_SYMBOL(drm_gem_vmap);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{ {
dma_resv_assert_held(obj->resv);
if (iosys_map_is_null(map)) if (iosys_map_is_null(map))
return; return;
...@@ -1197,6 +1201,26 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) ...@@ -1197,6 +1201,26 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
} }
EXPORT_SYMBOL(drm_gem_vunmap); EXPORT_SYMBOL(drm_gem_vunmap);
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
ret = drm_gem_vmap(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
EXPORT_SYMBOL(drm_gem_vmap_unlocked);
void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
drm_gem_vunmap(obj, map);
dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
/** /**
* drm_gem_lock_reservations - Sets up the ww context and acquires * drm_gem_lock_reservations - Sets up the ww context and acquires
* the lock on an array of GEM objects. * the lock on an array of GEM objects.
......
...@@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj) ...@@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
if (gem_obj->import_attach) { if (gem_obj->import_attach) {
if (dma_obj->vaddr) if (dma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt); drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) { } else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent) if (dma_obj->map_noncoherent)
...@@ -581,7 +581,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev, ...@@ -581,7 +581,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
struct iosys_map map; struct iosys_map map;
int ret; int ret;
ret = dma_buf_vmap(attach->dmabuf, &map); ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) { if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n"); DRM_ERROR("Failed to vmap PRIME buffer\n");
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -589,7 +589,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev, ...@@ -589,7 +589,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt); obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
dma_buf_vunmap(attach->dmabuf, &map); dma_buf_vunmap_unlocked(attach->dmabuf, &map);
return obj; return obj;
} }
......
...@@ -354,7 +354,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, ...@@ -354,7 +354,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL; ret = -EINVAL;
goto err_drm_gem_vunmap; goto err_drm_gem_vunmap;
} }
ret = drm_gem_vmap(obj, &map[i]); ret = drm_gem_vmap_unlocked(obj, &map[i]);
if (ret) if (ret)
goto err_drm_gem_vunmap; goto err_drm_gem_vunmap;
} }
...@@ -376,7 +376,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, ...@@ -376,7 +376,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
obj = drm_gem_fb_get_obj(fb, i); obj = drm_gem_fb_get_obj(fb, i);
if (!obj) if (!obj)
continue; continue;
drm_gem_vunmap(obj, &map[i]); drm_gem_vunmap_unlocked(obj, &map[i]);
} }
return ret; return ret;
} }
...@@ -403,7 +403,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map) ...@@ -403,7 +403,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue; continue;
if (iosys_map_is_null(&map[i])) if (iosys_map_is_null(&map[i]))
continue; continue;
drm_gem_vunmap(obj, &map[i]); drm_gem_vunmap_unlocked(obj, &map[i]);
} }
} }
EXPORT_SYMBOL(drm_gem_fb_vunmap); EXPORT_SYMBOL(drm_gem_fb_vunmap);
......
...@@ -64,13 +64,8 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem, ...@@ -64,13 +64,8 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map) struct iosys_map *map)
{ {
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
int ret;
dma_resv_lock(gem->resv, NULL);
ret = ttm_bo_vmap(bo, map);
dma_resv_unlock(gem->resv);
return ret; return ttm_bo_vmap(bo, map);
} }
EXPORT_SYMBOL(drm_gem_ttm_vmap); EXPORT_SYMBOL(drm_gem_ttm_vmap);
...@@ -87,9 +82,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, ...@@ -87,9 +82,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{ {
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map); ttm_bo_vunmap(bo, map);
dma_resv_unlock(gem->resv);
} }
EXPORT_SYMBOL(drm_gem_ttm_vunmap); EXPORT_SYMBOL(drm_gem_ttm_vunmap);
......
...@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) ...@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else { } else {
buffer_chunk->size = lima_bo_size(bo); buffer_chunk->size = lima_bo_size(bo);
ret = drm_gem_shmem_vmap(&bo->base, &map); ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
if (ret) { if (ret) {
kvfree(et); kvfree(et);
goto out; goto out;
...@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) ...@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
drm_gem_shmem_vunmap(&bo->base, &map); drm_gem_vunmap_unlocked(&bo->base.base, &map);
} }
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
......
...@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job) ...@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header; goto dump_header;
} }
ret = drm_gem_shmem_vmap(&bo->base, &map); ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
if (ret) { if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n"); dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0; iter.hdr->bomap.valid = 0;
...@@ -236,7 +236,7 @@ void panfrost_core_dump(struct panfrost_job *job) ...@@ -236,7 +236,7 @@ void panfrost_core_dump(struct panfrost_job *job)
vaddr = map.vaddr; vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size); memcpy(iter.data, vaddr, bo->base.base.size);
drm_gem_shmem_vunmap(&bo->base, &map); drm_gem_vunmap_unlocked(&bo->base.base, &map);
iter.hdr->bomap.valid = cpu_to_le32(1); iter.hdr->bomap.valid = cpu_to_le32(1);
......
...@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, ...@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo; goto err_close_bo;
} }
ret = drm_gem_shmem_vmap(bo, &map); ret = drm_gem_vmap_unlocked(&bo->base, &map);
if (ret) if (ret)
goto err_put_mapping; goto err_put_mapping;
perfcnt->buf = map.vaddr; perfcnt->buf = map.vaddr;
...@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, ...@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0; return 0;
err_vunmap: err_vunmap:
drm_gem_shmem_vunmap(bo, &map); drm_gem_vunmap_unlocked(&bo->base, &map);
err_put_mapping: err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping); panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo: err_close_bo:
...@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, ...@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL; perfcnt->user = NULL;
drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map); drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL; perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
......
...@@ -168,9 +168,16 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map) ...@@ -168,9 +168,16 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
bo->map_count++; bo->map_count++;
goto out; goto out;
} }
r = ttm_bo_vmap(&bo->tbo, &bo->map);
r = __qxl_bo_pin(bo);
if (r) if (r)
return r; return r;
r = ttm_bo_vmap(&bo->tbo, &bo->map);
if (r) {
__qxl_bo_unpin(bo);
return r;
}
bo->map_count = 1; bo->map_count = 1;
/* TODO: Remove kptr in favor of map everywhere. */ /* TODO: Remove kptr in favor of map everywhere. */
...@@ -192,12 +199,6 @@ int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map) ...@@ -192,12 +199,6 @@ int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
if (r) if (r)
return r; return r;
r = __qxl_bo_pin(bo);
if (r) {
qxl_bo_unreserve(bo);
return r;
}
r = qxl_bo_vmap_locked(bo, map); r = qxl_bo_vmap_locked(bo, map);
qxl_bo_unreserve(bo); qxl_bo_unreserve(bo);
return r; return r;
...@@ -247,6 +248,7 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo) ...@@ -247,6 +248,7 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo)
return; return;
bo->kptr = NULL; bo->kptr = NULL;
ttm_bo_vunmap(&bo->tbo, &bo->map); ttm_bo_vunmap(&bo->tbo, &bo->map);
__qxl_bo_unpin(bo);
} }
int qxl_bo_vunmap(struct qxl_bo *bo) int qxl_bo_vunmap(struct qxl_bo *bo)
...@@ -258,7 +260,6 @@ int qxl_bo_vunmap(struct qxl_bo *bo) ...@@ -258,7 +260,6 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
return r; return r;
qxl_bo_vunmap_locked(bo); qxl_bo_vunmap_locked(bo);
__qxl_bo_unpin(bo);
qxl_bo_unreserve(bo); qxl_bo_unreserve(bo);
return 0; return 0;
} }
......
...@@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) ...@@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct qxl_bo *bo = gem_to_qxl_bo(obj); struct qxl_bo *bo = gem_to_qxl_bo(obj);
int ret; int ret;
ret = qxl_bo_vmap(bo, map); ret = qxl_bo_vmap_locked(bo, map);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -71,5 +71,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj, ...@@ -71,5 +71,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj,
{ {
struct qxl_bo *bo = gem_to_qxl_bo(obj); struct qxl_bo *bo = gem_to_qxl_bo(obj);
qxl_bo_vunmap(bo); qxl_bo_vunmap_locked(bo);
} }
...@@ -408,6 +408,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj); ...@@ -408,6 +408,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed); bool dirty, bool accessed);
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out); int count, struct drm_gem_object ***objs_out);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment