Commit dd7cfd64 authored by Maarten Lankhorst's avatar Maarten Lankhorst

drm/ttm: kill fence_lock

No users are left, kill it off! :D
Conversion to the reservation api is next on the list, after
that the functionality can be restored with rcu.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
parent 7040138f
...@@ -1212,9 +1212,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -1212,9 +1212,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
} }
/* Fallback to software copy. */ /* Fallback to software copy. */
spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
spin_unlock(&bo->bdev->fence_lock);
if (ret == 0) if (ret == 0)
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
...@@ -1457,26 +1455,19 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ...@@ -1457,26 +1455,19 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm); ttm_pool_unpopulate(ttm);
} }
static void
nouveau_bo_fence_unref(void **sync_obj)
{
nouveau_fence_unref((struct nouveau_fence **)sync_obj);
}
void void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
{ {
struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
struct nouveau_fence *old_fence = NULL;
lockdep_assert_held(&nvbo->bo.resv->lock.base); lockdep_assert_held(&nvbo->bo.resv->lock.base);
spin_lock(&nvbo->bo.bdev->fence_lock); nouveau_bo_fence_unref(&nvbo->bo.sync_obj);
old_fence = nvbo->bo.sync_obj; nvbo->bo.sync_obj = nouveau_fence_ref(fence);
nvbo->bo.sync_obj = new_fence;
spin_unlock(&nvbo->bo.bdev->fence_lock);
nouveau_fence_unref(&old_fence);
}
static void
nouveau_bo_fence_unref(void **sync_obj)
{
nouveau_fence_unref((struct nouveau_fence **)sync_obj);
} }
static void * static void *
......
...@@ -722,11 +722,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -722,11 +722,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
goto fail_unpin; goto fail_unpin;
/* synchronise rendering channel with the kernel's channel */ /* synchronise rendering channel with the kernel's channel */
spin_lock(&new_bo->bo.bdev->fence_lock); ret = nouveau_fence_sync(new_bo->bo.sync_obj, chan);
fence = nouveau_fence_ref(new_bo->bo.sync_obj);
spin_unlock(&new_bo->bo.bdev->fence_lock);
ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
if (ret) { if (ret) {
ttm_bo_unreserve(&new_bo->bo); ttm_bo_unreserve(&new_bo->bo);
goto fail_unpin; goto fail_unpin;
......
...@@ -103,9 +103,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) ...@@ -103,9 +103,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
list_del(&vma->head); list_del(&vma->head);
if (mapped) { if (mapped) {
spin_lock(&nvbo->bo.bdev->fence_lock);
fence = nouveau_fence_ref(nvbo->bo.sync_obj); fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
} }
if (fence) { if (fence) {
...@@ -430,17 +428,11 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -430,17 +428,11 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
static int static int
validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
{ {
struct nouveau_fence *fence = NULL; struct nouveau_fence *fence = nvbo->bo.sync_obj;
int ret = 0; int ret = 0;
spin_lock(&nvbo->bo.bdev->fence_lock); if (fence)
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (fence) {
ret = nouveau_fence_sync(fence, chan); ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
}
return ret; return ret;
} }
...@@ -659,9 +651,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, ...@@ -659,9 +651,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
data |= r->vor; data |= r->vor;
} }
spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false); ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) { if (ret) {
NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
break; break;
...@@ -894,11 +884,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, ...@@ -894,11 +884,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL);
if (!ret) { if (!ret) {
spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, true, true, true); ret = ttm_bo_wait(&nvbo->bo, true, true, true);
if (!no_wait && ret) if (!no_wait && ret)
fence = nouveau_fence_ref(nvbo->bo.sync_obj); fence = nouveau_fence_ref(nvbo->bo.sync_obj);
spin_unlock(&nvbo->bo.bdev->fence_lock);
ttm_bo_unreserve(&nvbo->bo); ttm_bo_unreserve(&nvbo->bo);
} }
......
...@@ -628,9 +628,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal ...@@ -628,9 +628,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
if (stall) if (stall)
mutex_unlock(&qdev->surf_evict_mutex); mutex_unlock(&qdev->surf_evict_mutex);
spin_lock(&surf->tbo.bdev->fence_lock);
ret = ttm_bo_wait(&surf->tbo, true, true, !stall); ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
spin_unlock(&surf->tbo.bdev->fence_lock);
if (stall) if (stall)
mutex_lock(&qdev->surf_evict_mutex); mutex_lock(&qdev->surf_evict_mutex);
......
...@@ -60,9 +60,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) ...@@ -60,9 +60,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
{ {
void *ret; void *ret;
int retval = 0; int retval = 0;
struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
spin_lock(&bo->tbo.bdev->fence_lock);
ret = radix_tree_delete(&qfence->tree, rel_id); ret = radix_tree_delete(&qfence->tree, rel_id);
if (ret == qfence) if (ret == qfence)
...@@ -71,7 +68,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) ...@@ -71,7 +68,6 @@ int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id); DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
retval = -ENOENT; retval = -ENOENT;
} }
spin_unlock(&bo->tbo.bdev->fence_lock);
return retval; return retval;
} }
......
...@@ -76,12 +76,10 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, ...@@ -76,12 +76,10 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
} }
return r; return r;
} }
spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type) if (mem_type)
*mem_type = bo->tbo.mem.mem_type; *mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj) if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait); r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo); ttm_bo_unreserve(&bo->tbo);
return r; return r;
} }
......
...@@ -337,7 +337,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ...@@ -337,7 +337,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
glob = bo->glob; glob = bo->glob;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, &release->bos, head) { list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo; bo = entry->bo;
...@@ -352,7 +351,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ...@@ -352,7 +351,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
entry->reserved = false; entry->reserved = false;
} }
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ww_acquire_fini(&release->ticket); ww_acquire_fini(&release->ticket);
} }
......
...@@ -476,11 +476,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, ...@@ -476,11 +476,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
obj = new_radeon_fb->obj; obj = new_radeon_fb->obj;
new_rbo = gem_to_radeon_bo(obj); new_rbo = gem_to_radeon_bo(obj);
spin_lock(&new_rbo->tbo.bdev->fence_lock);
if (new_rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
spin_unlock(&new_rbo->tbo.bdev->fence_lock);
/* pin the new buffer */ /* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
work->old_rbo, new_rbo); work->old_rbo, new_rbo);
...@@ -499,6 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, ...@@ -499,6 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n"); DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup; goto cleanup;
} }
work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo); radeon_bo_unreserve(new_rbo);
...@@ -582,7 +578,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, ...@@ -582,7 +578,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence); radeon_fence_unref(&work->fence);
kfree(work); kfree(work);
return r; return r;
} }
......
...@@ -779,12 +779,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) ...@@ -779,12 +779,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type) if (mem_type)
*mem_type = bo->tbo.mem.mem_type; *mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj) if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait); r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo); ttm_bo_unreserve(&bo->tbo);
return r; return r;
} }
...@@ -415,10 +415,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ...@@ -415,10 +415,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL); ret = __ttm_bo_reserve(bo, false, true, false, NULL);
spin_lock(&bdev->fence_lock); if (!ret) {
(void) ttm_bo_wait(bo, false, false, true); (void) ttm_bo_wait(bo, false, false, true);
if (!ret && !bo->sync_obj) {
spin_unlock(&bdev->fence_lock); if (!bo->sync_obj) {
put_count = ttm_bo_del_from_lru(bo); put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
...@@ -428,11 +428,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ...@@ -428,11 +428,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
return; return;
} }
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
if (!ret) {
/* /*
* Make NO_EVICT bos immediately available to * Make NO_EVICT bos immediately available to
...@@ -481,7 +477,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, ...@@ -481,7 +477,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
int put_count; int put_count;
int ret; int ret;
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true); ret = ttm_bo_wait(bo, false, false, true);
if (ret && !no_wait_gpu) { if (ret && !no_wait_gpu) {
...@@ -493,7 +488,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, ...@@ -493,7 +488,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* no new sync objects can be attached. * no new sync objects can be attached.
*/ */
sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
...@@ -523,11 +517,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, ...@@ -523,11 +517,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* remove sync_obj with ttm_bo_wait, the wait should be * remove sync_obj with ttm_bo_wait, the wait should be
* finished, and no new wait object should have been added. * finished, and no new wait object should have been added.
*/ */
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true); ret = ttm_bo_wait(bo, false, false, true);
WARN_ON(ret); WARN_ON(ret);
} }
spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) { if (ret || unlikely(list_empty(&bo->ddestroy))) {
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
...@@ -665,9 +657,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -665,9 +657,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement; struct ttm_placement placement;
int ret = 0; int ret = 0;
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) { if (ret != -ERESTARTSYS) {
...@@ -958,7 +948,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -958,7 +948,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
{ {
int ret = 0; int ret = 0;
struct ttm_mem_reg mem; struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
lockdep_assert_held(&bo->resv->lock.base); lockdep_assert_held(&bo->resv->lock.base);
...@@ -967,9 +956,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -967,9 +956,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary, * Have the driver move function wait for idle when necessary,
* instead of doing it here. * instead of doing it here.
*/ */
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bdev->fence_lock);
if (ret) if (ret)
return ret; return ret;
mem.num_pages = bo->num_pages; mem.num_pages = bo->num_pages;
...@@ -1459,7 +1446,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, ...@@ -1459,7 +1446,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->glob = glob; bdev->glob = glob;
bdev->need_dma32 = need_dma32; bdev->need_dma32 = need_dma32;
bdev->val_seq = 0; bdev->val_seq = 0;
spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex); mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list); list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex); mutex_unlock(&glob->device_list_mutex);
...@@ -1517,7 +1503,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, ...@@ -1517,7 +1503,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait) bool lazy, bool interruptible, bool no_wait)
{ {
struct ttm_bo_driver *driver = bo->bdev->driver; struct ttm_bo_driver *driver = bo->bdev->driver;
struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj; void *sync_obj;
int ret = 0; int ret = 0;
...@@ -1526,53 +1511,33 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, ...@@ -1526,53 +1511,33 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
if (likely(bo->sync_obj == NULL)) if (likely(bo->sync_obj == NULL))
return 0; return 0;
while (bo->sync_obj) { if (bo->sync_obj) {
if (driver->sync_obj_signaled(bo->sync_obj)) { if (driver->sync_obj_signaled(bo->sync_obj)) {
void *tmp_obj = bo->sync_obj; driver->sync_obj_unref(&bo->sync_obj);
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
spin_unlock(&bdev->fence_lock); return 0;
driver->sync_obj_unref(&tmp_obj);
spin_lock(&bdev->fence_lock);
continue;
} }
if (no_wait) if (no_wait)
return -EBUSY; return -EBUSY;
sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
ret = driver->sync_obj_wait(sync_obj, ret = driver->sync_obj_wait(sync_obj,
lazy, interruptible); lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj); if (likely(ret == 0)) {
spin_lock(&bdev->fence_lock);
return ret;
}
spin_lock(&bdev->fence_lock);
if (likely(bo->sync_obj == sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, clear_bit(TTM_BO_PRIV_FLAG_MOVING,
&bo->priv_flags); &bo->priv_flags);
spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&bo->sync_obj);
driver->sync_obj_unref(&sync_obj);
driver->sync_obj_unref(&tmp_obj);
spin_lock(&bdev->fence_lock);
} else {
spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
spin_lock(&bdev->fence_lock);
} }
driver->sync_obj_unref(&sync_obj);
} }
return 0; return ret;
} }
EXPORT_SYMBOL(ttm_bo_wait); EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{ {
struct ttm_bo_device *bdev = bo->bdev;
int ret = 0; int ret = 0;
/* /*
...@@ -1582,9 +1547,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ...@@ -1582,9 +1547,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true, no_wait); ret = ttm_bo_wait(bo, false, true, no_wait);
spin_unlock(&bdev->fence_lock);
if (likely(ret == 0)) if (likely(ret == 0))
atomic_inc(&bo->cpu_writers); atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
...@@ -1641,9 +1604,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ...@@ -1641,9 +1604,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* Wait for GPU, then move to system cached. * Wait for GPU, then move to system cached.
*/ */
spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false); ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bo->bdev->fence_lock);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
......
...@@ -466,12 +466,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ...@@ -466,12 +466,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
drm_vma_node_reset(&fbo->vma_node); drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0); atomic_set(&fbo->cpu_writers, 0);
spin_lock(&bdev->fence_lock);
if (bo->sync_obj) if (bo->sync_obj)
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
else else
fbo->sync_obj = NULL; fbo->sync_obj = NULL;
spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref); kref_init(&fbo->list_kref);
kref_init(&fbo->kref); kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy; fbo->destroy = &ttm_transfered_destroy;
...@@ -657,7 +655,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ...@@ -657,7 +655,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_buffer_object *ghost_obj; struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL; void *tmp_obj = NULL;
spin_lock(&bdev->fence_lock);
if (bo->sync_obj) { if (bo->sync_obj) {
tmp_obj = bo->sync_obj; tmp_obj = bo->sync_obj;
bo->sync_obj = NULL; bo->sync_obj = NULL;
...@@ -665,7 +662,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ...@@ -665,7 +662,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj = driver->sync_obj_ref(sync_obj);
if (evict) { if (evict) {
ret = ttm_bo_wait(bo, false, false, false); ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
if (tmp_obj) if (tmp_obj)
driver->sync_obj_unref(&tmp_obj); driver->sync_obj_unref(&tmp_obj);
if (ret) if (ret)
...@@ -688,7 +684,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ...@@ -688,7 +684,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/ */
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
spin_unlock(&bdev->fence_lock);
if (tmp_obj) if (tmp_obj)
driver->sync_obj_unref(&tmp_obj); driver->sync_obj_unref(&tmp_obj);
......
...@@ -45,10 +45,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, ...@@ -45,10 +45,8 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_area_struct *vma, struct vm_area_struct *vma,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
struct ttm_bo_device *bdev = bo->bdev;
int ret = 0; int ret = 0;
spin_lock(&bdev->fence_lock);
if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
goto out_unlock; goto out_unlock;
...@@ -82,7 +80,6 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, ...@@ -82,7 +80,6 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
VM_FAULT_NOPAGE; VM_FAULT_NOPAGE;
out_unlock: out_unlock:
spin_unlock(&bdev->fence_lock);
return ret; return ret;
} }
......
...@@ -217,7 +217,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -217,7 +217,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
glob = bo->glob; glob = bo->glob;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
bo = entry->bo; bo = entry->bo;
...@@ -227,7 +226,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -227,7 +226,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
entry->reserved = false; entry->reserved = false;
} }
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
if (ticket) if (ticket)
ww_acquire_fini(ticket); ww_acquire_fini(ticket);
......
...@@ -863,11 +863,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, ...@@ -863,11 +863,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/ */
static void vmw_swap_notify(struct ttm_buffer_object *bo) static void vmw_swap_notify(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev;
spin_lock(&bdev->fence_lock);
ttm_bo_wait(bo, false, false, false); ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
} }
......
...@@ -567,12 +567,13 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, ...@@ -567,12 +567,13 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
int ret; int ret;
if (flags & drm_vmw_synccpu_allow_cs) { if (flags & drm_vmw_synccpu_allow_cs) {
struct ttm_bo_device *bdev = bo->bdev; bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
spin_lock(&bdev->fence_lock); ret = ttm_bo_reserve(bo, true, nonblock, false, NULL);
ret = ttm_bo_wait(bo, false, true, if (!ret) {
!!(flags & drm_vmw_synccpu_dontblock)); ret = ttm_bo_wait(bo, false, true, nonblock);
spin_unlock(&bdev->fence_lock); ttm_bo_unreserve(bo);
}
return ret; return ret;
} }
...@@ -1429,12 +1430,10 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, ...@@ -1429,12 +1430,10 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
else else
driver->sync_obj_ref(fence); driver->sync_obj_ref(fence);
spin_lock(&bdev->fence_lock);
old_fence_obj = bo->sync_obj; old_fence_obj = bo->sync_obj;
bo->sync_obj = fence; bo->sync_obj = fence;
spin_unlock(&bdev->fence_lock);
if (old_fence_obj) if (old_fence_obj)
vmw_fence_obj_unreference(&old_fence_obj); vmw_fence_obj_unreference(&old_fence_obj);
...@@ -1475,7 +1474,6 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, ...@@ -1475,7 +1474,6 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB) { if (mem->mem_type != VMW_PL_MOB) {
struct vmw_resource *res, *n; struct vmw_resource *res, *n;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
val_buf.bo = bo; val_buf.bo = bo;
...@@ -1491,9 +1489,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, ...@@ -1491,9 +1489,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
list_del_init(&res->mob_head); list_del_init(&res->mob_head);
} }
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, false); (void) ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
} }
} }
......
...@@ -237,10 +237,7 @@ struct ttm_buffer_object { ...@@ -237,10 +237,7 @@ struct ttm_buffer_object {
struct list_head io_reserve_lru; struct list_head io_reserve_lru;
/** /**
* Members protected by struct buffer_object_device::fence_lock * Members protected by a bo reservation.
* In addition, setting sync_obj to anything else
* than NULL requires bo::reserved to be held. This allows for
* checking NULL while reserved but not holding the mentioned lock.
*/ */
void *sync_obj; void *sync_obj;
......
...@@ -521,8 +521,6 @@ struct ttm_bo_global { ...@@ -521,8 +521,6 @@ struct ttm_bo_global {
* *
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers. * @man: An array of mem_type_managers.
* @fence_lock: Protects the synchronizing members on *all* bos belonging
* to this device.
* @vma_manager: Address space manager * @vma_manager: Address space manager
* lru_lock: Spinlock that protects the buffer+device lru lists and * lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists. * ddestroy lists.
...@@ -542,7 +540,6 @@ struct ttm_bo_device { ...@@ -542,7 +540,6 @@ struct ttm_bo_device {
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_bo_driver *driver; struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
spinlock_t fence_lock;
/* /*
* Protected by internal locks. * Protected by internal locks.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment