Commit 418aa0c2 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: cleanup gem init/finit

Remove the double housekeeping and use something sane to
forcefuly delete BOs on unload.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7ea23565
...@@ -607,13 +607,7 @@ struct amdgpu_sa_bo { ...@@ -607,13 +607,7 @@ struct amdgpu_sa_bo {
/* /*
* GEM objects. * GEM objects.
*/ */
struct amdgpu_gem { void amdgpu_gem_force_release(struct amdgpu_device *adev);
struct mutex mutex;
struct list_head objects;
};
int amdgpu_gem_init(struct amdgpu_device *adev);
void amdgpu_gem_fini(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain, int alignment, u32 initial_domain,
u64 flags, bool kernel, u64 flags, bool kernel,
...@@ -2012,7 +2006,6 @@ struct amdgpu_device { ...@@ -2012,7 +2006,6 @@ struct amdgpu_device {
/* memory management */ /* memory management */
struct amdgpu_mman mman; struct amdgpu_mman mman;
struct amdgpu_gem gem;
struct amdgpu_vram_scratch vram_scratch; struct amdgpu_vram_scratch vram_scratch;
struct amdgpu_wb wb; struct amdgpu_wb wb;
atomic64_t vram_usage; atomic64_t vram_usage;
......
...@@ -1426,7 +1426,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1426,7 +1426,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* can recall function without having locking issues */ * can recall function without having locking issues */
mutex_init(&adev->vm_manager.lock); mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0); atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->gem.mutex);
mutex_init(&adev->pm.mutex); mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex); mutex_init(&adev->srbm_mutex);
......
...@@ -84,22 +84,31 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -84,22 +84,31 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
} }
*obj = &robj->gem_base; *obj = &robj->gem_base;
mutex_lock(&adev->gem.mutex);
list_add_tail(&robj->list, &adev->gem.objects);
mutex_unlock(&adev->gem.mutex);
return 0; return 0;
} }
int amdgpu_gem_init(struct amdgpu_device *adev) void amdgpu_gem_force_release(struct amdgpu_device *adev)
{ {
INIT_LIST_HEAD(&adev->gem.objects); struct drm_device *ddev = adev->ddev;
return 0; struct drm_file *file;
}
void amdgpu_gem_fini(struct amdgpu_device *adev) mutex_lock(&ddev->struct_mutex);
{
amdgpu_bo_force_delete(adev); list_for_each_entry(file, &ddev->filelist, lhead) {
struct drm_gem_object *gobj;
int handle;
WARN_ONCE(1, "Still active user space clients!\n");
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
drm_gem_object_unreference(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
}
mutex_unlock(&ddev->struct_mutex);
} }
/* /*
......
...@@ -97,9 +97,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) ...@@ -97,9 +97,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
mutex_lock(&bo->adev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->adev->gem.mutex);
drm_gem_object_release(&bo->gem_base); drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent); amdgpu_bo_unref(&bo->parent);
kfree(bo->metadata); kfree(bo->metadata);
...@@ -473,26 +470,6 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) ...@@ -473,26 +470,6 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
} }
void amdgpu_bo_force_delete(struct amdgpu_device *adev)
{
struct amdgpu_bo *bo, *n;
if (list_empty(&adev->gem.objects)) {
return;
}
dev_err(adev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
dev_err(adev->dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
mutex_lock(&bo->adev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->adev->gem.mutex);
/* this should unref the ttm bo */
drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
int amdgpu_bo_init(struct amdgpu_device *adev) int amdgpu_bo_init(struct amdgpu_device *adev)
{ {
/* Add an MTRR for the VRAM */ /* Add an MTRR for the VRAM */
......
...@@ -149,7 +149,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -149,7 +149,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 *gpu_addr); u64 *gpu_addr);
int amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev); int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
void amdgpu_bo_force_delete(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev); int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev);
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
......
...@@ -73,10 +73,6 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -73,10 +73,6 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
mutex_lock(&adev->gem.mutex);
list_add_tail(&bo->list, &adev->gem.objects);
mutex_unlock(&adev->gem.mutex);
return &bo->gem_base; return &bo->gem_base;
} }
......
...@@ -927,10 +927,6 @@ static int gmc_v7_0_sw_init(void *handle) ...@@ -927,10 +927,6 @@ static int gmc_v7_0_sw_init(void *handle)
int dma_bits; int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_gem_init(adev);
if (r)
return r;
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r) if (r)
return r; return r;
...@@ -1011,7 +1007,7 @@ static int gmc_v7_0_sw_fini(void *handle) ...@@ -1011,7 +1007,7 @@ static int gmc_v7_0_sw_fini(void *handle)
adev->vm_manager.enabled = false; adev->vm_manager.enabled = false;
} }
gmc_v7_0_gart_fini(adev); gmc_v7_0_gart_fini(adev);
amdgpu_gem_fini(adev); amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
return 0; return 0;
......
...@@ -887,10 +887,6 @@ static int gmc_v8_0_sw_init(void *handle) ...@@ -887,10 +887,6 @@ static int gmc_v8_0_sw_init(void *handle)
int dma_bits; int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_gem_init(adev);
if (r)
return r;
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r) if (r)
return r; return r;
...@@ -971,7 +967,7 @@ static int gmc_v8_0_sw_fini(void *handle) ...@@ -971,7 +967,7 @@ static int gmc_v8_0_sw_fini(void *handle)
adev->vm_manager.enabled = false; adev->vm_manager.enabled = false;
} }
gmc_v8_0_gart_fini(adev); gmc_v8_0_gart_fini(adev);
amdgpu_gem_fini(adev); amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment