Commit c7523083 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/ttm: Hide the implementation details of reservation

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
parent 2844ea3f
......@@ -349,7 +349,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
qxl_fence_add_release_locked(&qbo->fence, release->id);
ttm_bo_add_to_lru(bo);
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
......
......@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
ret = __ttm_bo_reserve(bo, false, true, false, 0);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
......@@ -443,7 +443,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
}
kref_get(&bo->list_kref);
......@@ -494,7 +494,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
......@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
ret = __ttm_bo_reserve(bo, false, true, false, 0);
/*
* We raced, and lost, someone else holds the reservation now,
......@@ -532,7 +532,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) {
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
return ret;
}
......@@ -577,10 +577,10 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
ret = __ttm_bo_reserve(entry, false, true, false, 0);
if (remove_all && ret) {
spin_unlock(&glob->lru_lock);
ret = ttm_bo_reserve_nolru(entry, false, false,
ret = __ttm_bo_reserve(entry, false, false,
false, 0);
spin_lock(&glob->lru_lock);
}
......@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
ret = __ttm_bo_reserve(bo, false, true, false, 0);
if (!ret)
break;
}
......@@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
ret = __ttm_bo_reserve(bo, false, true, false, 0);
if (!ret)
break;
}
......@@ -1697,7 +1697,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* already swapped buffer.
*/
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
......@@ -1731,10 +1731,10 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock;
ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
ret = __ttm_bo_reserve(bo, true, false, false, NULL);
if (unlikely(ret != 0))
goto out_unlock;
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
out_unlock:
mutex_unlock(&bo->wu_mutex);
......
......@@ -46,7 +46,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
ttm_bo_add_to_lru(bo);
entry->removed = false;
}
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
}
}
......@@ -140,7 +140,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (entry->reserved)
continue;
ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
ticket);
if (ret == -EDEADLK) {
......@@ -224,7 +224,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
ttm_bo_add_to_lru(bo);
ww_mutex_unlock(&bo->resv->lock);
__ttm_bo_unreserve(bo);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
......
......@@ -788,7 +788,7 @@ extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_reserve_nolru:
* __ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
......@@ -809,7 +809,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
* -EALREADY: Bo already reserved using @ticket. This error code will only
* be returned if @use_ticket is set to true.
*/
static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket)
......@@ -888,8 +888,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
WARN_ON(!atomic_read(&bo->kref.refcount));
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
ticket);
ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
......@@ -929,20 +928,14 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
}
/**
* ttm_bo_unreserve_ticket
* __ttm_bo_unreserve
* @bo: A pointer to a struct ttm_buffer_object.
* @ticket: ww_acquire_ctx used for reserving
*
* Unreserve a previous reservation of @bo made with @ticket.
* Unreserve a previous reservation of @bo where the buffer object is
* already on lru lists.
*/
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
struct ww_acquire_ctx *t)
static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
}
ww_mutex_unlock(&bo->resv->lock);
}
......@@ -955,7 +948,25 @@ static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
ttm_bo_unreserve_ticket(bo, NULL);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bo->glob->lru_lock);
}
__ttm_bo_unreserve(bo);
}
/**
* ttm_bo_unreserve_ticket
* @bo: A pointer to a struct ttm_buffer_object.
* @ticket: ww_acquire_ctx used for reserving
*
* Unreserve a previous reservation of @bo made with @ticket.
*/
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
struct ww_acquire_ctx *t)
{
ttm_bo_unreserve(bo);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment