Commit 3f6a1e22 authored by Dmitry Osipenko's avatar Dmitry Osipenko

drm/shmem-helper: Switch to use drm_* debug helpers

Ease debugging of a multi-GPU system by using drm_WARN_*() and
drm_dbg_kms() helpers that print out DRM device name corresponding
to shmem GEM.
Reviewed-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Suggested-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://lore.kernel.org/all/20230108210445.3948344-6-dmitry.osipenko@collabora.com/
parent 3842d671
...@@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) ...@@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
WARN_ON(shmem->vmap_use_count); drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (obj->import_attach) { if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt); drm_prime_gem_destroy(obj, shmem->sgt);
...@@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) ...@@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
drm_gem_shmem_put_pages(shmem); drm_gem_shmem_put_pages(shmem);
} }
WARN_ON(shmem->pages_use_count); drm_WARN_ON(obj->dev, shmem->pages_use_count);
drm_gem_object_release(obj); drm_gem_object_release(obj);
mutex_destroy(&shmem->pages_lock); mutex_destroy(&shmem->pages_lock);
...@@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) ...@@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
pages = drm_gem_get_pages(obj); pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
shmem->pages_use_count = 0; shmem->pages_use_count = 0;
return PTR_ERR(pages); return PTR_ERR(pages);
} }
...@@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) ...@@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
*/ */
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base;
int ret; int ret;
WARN_ON(shmem->base.import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
ret = mutex_lock_interruptible(&shmem->pages_lock); ret = mutex_lock_interruptible(&shmem->pages_lock);
if (ret) if (ret)
...@@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) ...@@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
if (WARN_ON_ONCE(!shmem->pages_use_count)) if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return; return;
if (--shmem->pages_use_count > 0) if (--shmem->pages_use_count > 0)
...@@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); ...@@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
*/ */
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{ {
WARN_ON(shmem->base.import_attach); struct drm_gem_object *obj = &shmem->base;
drm_WARN_ON(obj->dev, obj->import_attach);
return drm_gem_shmem_get_pages(shmem); return drm_gem_shmem_get_pages(shmem);
} }
...@@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); ...@@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
*/ */
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{ {
WARN_ON(shmem->base.import_attach); struct drm_gem_object *obj = &shmem->base;
drm_WARN_ON(obj->dev, obj->import_attach);
drm_gem_shmem_put_pages(shmem); drm_gem_shmem_put_pages(shmem);
} }
...@@ -303,7 +309,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, ...@@ -303,7 +309,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
if (obj->import_attach) { if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map); ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) { if (!ret) {
if (WARN_ON(map->is_iomem)) { if (drm_WARN_ON(obj->dev, map->is_iomem)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map); dma_buf_vunmap(obj->import_attach->dmabuf, map);
ret = -EIO; ret = -EIO;
goto err_put_pages; goto err_put_pages;
...@@ -328,7 +334,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, ...@@ -328,7 +334,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
} }
if (ret) { if (ret) {
DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
goto err_put_pages; goto err_put_pages;
} }
...@@ -378,7 +384,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, ...@@ -378,7 +384,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
if (WARN_ON_ONCE(!shmem->vmap_use_count)) if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
return; return;
if (--shmem->vmap_use_count > 0) if (--shmem->vmap_use_count > 0)
...@@ -461,7 +467,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) ...@@ -461,7 +467,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt); sg_free_table(shmem->sgt);
...@@ -550,7 +556,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) ...@@ -550,7 +556,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
mutex_lock(&shmem->pages_lock); mutex_lock(&shmem->pages_lock);
if (page_offset >= num_pages || if (page_offset >= num_pages ||
WARN_ON_ONCE(!shmem->pages) || drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
shmem->madv < 0) { shmem->madv < 0) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
} else { } else {
...@@ -569,7 +575,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) ...@@ -569,7 +575,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
WARN_ON(shmem->base.import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
mutex_lock(&shmem->pages_lock); mutex_lock(&shmem->pages_lock);
...@@ -578,7 +584,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) ...@@ -578,7 +584,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new * mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()). * mm the vma is getting copied into (ie. on fork()).
*/ */
if (!WARN_ON_ONCE(!shmem->pages_use_count)) if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++; shmem->pages_use_count++;
mutex_unlock(&shmem->pages_lock); mutex_unlock(&shmem->pages_lock);
...@@ -672,7 +678,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) ...@@ -672,7 +678,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{ {
struct drm_gem_object *obj = &shmem->base; struct drm_gem_object *obj = &shmem->base;
WARN_ON(shmem->base.import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
} }
...@@ -687,7 +693,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ ...@@ -687,7 +693,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt) if (shmem->sgt)
return shmem->sgt; return shmem->sgt;
WARN_ON(obj->import_attach); drm_WARN_ON(obj->dev, obj->import_attach);
ret = drm_gem_shmem_get_pages_locked(shmem); ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment