Commit c0c2c3bf authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: completely rework ttm_bo_delayed_delete

There is no guarantee that the next entry on the ddelete list stays on
the list when we drop the locks.

Completely rework this mess by moving processed entries on a temporary
list.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-and-Tested-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 09052863
...@@ -572,60 +572,37 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, ...@@ -572,60 +572,37 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* Traverse the delayed list, and call ttm_bo_cleanup_refs on all * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
* encountered buffers. * encountered buffers.
*/ */
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{ {
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct ttm_buffer_object *entry = NULL; struct list_head removed;
int ret = 0; bool empty;
spin_lock(&glob->lru_lock);
if (list_empty(&bdev->ddestroy))
goto out_unlock;
entry = list_first_entry(&bdev->ddestroy,
struct ttm_buffer_object, ddestroy);
kref_get(&entry->list_kref);
for (;;) { INIT_LIST_HEAD(&removed);
struct ttm_buffer_object *nentry = NULL;
if (entry->ddestroy.next != &bdev->ddestroy) {
nentry = list_first_entry(&entry->ddestroy,
struct ttm_buffer_object, ddestroy);
kref_get(&nentry->list_kref);
}
ret = reservation_object_trylock(entry->resv) ? 0 : -EBUSY; spin_lock(&glob->lru_lock);
if (remove_all && ret) { while (!list_empty(&bdev->ddestroy)) {
spin_unlock(&glob->lru_lock); struct ttm_buffer_object *bo;
ret = reservation_object_lock(entry->resv, NULL);
spin_lock(&glob->lru_lock);
}
if (!ret) bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
ret = ttm_bo_cleanup_refs(entry, false, !remove_all, ddestroy);
true); kref_get(&bo->list_kref);
else list_move_tail(&bo->ddestroy, &removed);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
kref_put(&entry->list_kref, ttm_bo_release_list); reservation_object_lock(bo->resv, NULL);
entry = nentry;
if (ret || !entry) spin_lock(&glob->lru_lock);
goto out; ttm_bo_cleanup_refs(bo, false, !remove_all, true);
kref_put(&bo->list_kref, ttm_bo_release_list);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (list_empty(&entry->ddestroy))
break;
} }
list_splice_tail(&removed, &bdev->ddestroy);
out_unlock: empty = list_empty(&bdev->ddestroy);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
out:
if (entry) return empty;
kref_put(&entry->list_kref, ttm_bo_release_list);
return ret;
} }
static void ttm_bo_delayed_workqueue(struct work_struct *work) static void ttm_bo_delayed_workqueue(struct work_struct *work)
...@@ -633,7 +610,7 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work) ...@@ -633,7 +610,7 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work)
struct ttm_bo_device *bdev = struct ttm_bo_device *bdev =
container_of(work, struct ttm_bo_device, wq.work); container_of(work, struct ttm_bo_device, wq.work);
if (ttm_bo_delayed_delete(bdev, false)) { if (!ttm_bo_delayed_delete(bdev, false)) {
schedule_delayed_work(&bdev->wq, schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100); ((HZ / 100) < 1) ? 1 : HZ / 100);
} }
...@@ -1573,13 +1550,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) ...@@ -1573,13 +1550,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
cancel_delayed_work_sync(&bdev->wq); cancel_delayed_work_sync(&bdev->wq);
while (ttm_bo_delayed_delete(bdev, true)) if (ttm_bo_delayed_delete(bdev, true))
;
spin_lock(&glob->lru_lock);
if (list_empty(&bdev->ddestroy))
TTM_DEBUG("Delayed destroy list was clean\n"); TTM_DEBUG("Delayed destroy list was clean\n");
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&bdev->man[0].lru[0])) if (list_empty(&bdev->man[0].lru[0]))
TTM_DEBUG("Swap list %d was clean\n", i); TTM_DEBUG("Swap list %d was clean\n", i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment