Commit dfd5e50e authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: remove use_ticket parameter from ttm_bo_reserve

Not used any more.
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5ee7b41a
...@@ -71,7 +71,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) ...@@ -71,7 +71,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{ {
int r; int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
dev_err(bo->adev->dev, "%p reserve failed\n", bo); dev_err(bo->adev->dev, "%p reserve failed\n", bo);
......
...@@ -367,7 +367,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait) ...@@ -367,7 +367,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
{ {
int ret; int ret;
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY) if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo); DRM_ERROR("reserve failed %p\n", bo);
......
...@@ -82,7 +82,7 @@ static int bochsfb_create(struct drm_fb_helper *helper, ...@@ -82,7 +82,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
bo = gem_to_bochs_bo(gobj); bo = gem_to_bochs_bo(gobj);
ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret) if (ret)
return ret; return ret;
......
...@@ -43,7 +43,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -43,7 +43,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) { if (old_fb) {
bochs_fb = to_bochs_framebuffer(old_fb); bochs_fb = to_bochs_framebuffer(old_fb);
bo = gem_to_bochs_bo(bochs_fb->obj); bo = gem_to_bochs_bo(bochs_fb->obj);
ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret) { if (ret) {
DRM_ERROR("failed to reserve old_fb bo\n"); DRM_ERROR("failed to reserve old_fb bo\n");
} else { } else {
...@@ -57,7 +57,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -57,7 +57,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
bochs_fb = to_bochs_framebuffer(crtc->primary->fb); bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
bo = gem_to_bochs_bo(bochs_fb->obj); bo = gem_to_bochs_bo(bochs_fb->obj);
ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret) if (ret)
return ret; return ret;
......
...@@ -245,7 +245,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) ...@@ -245,7 +245,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
{ {
int ret; int ret;
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY) if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo); DRM_ERROR("reserve failed %p\n", bo);
......
...@@ -281,7 +281,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait) ...@@ -281,7 +281,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
{ {
int ret; int ret;
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY) if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo); DRM_ERROR("reserve failed %p\n", bo);
......
...@@ -312,7 +312,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) ...@@ -312,7 +312,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
bool force = false, evict = false; bool force = false, evict = false;
int ret; int ret;
ret = ttm_bo_reserve(bo, false, false, false, NULL); ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -385,7 +385,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) ...@@ -385,7 +385,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo; struct ttm_buffer_object *bo = &nvbo->bo;
int ret, ref; int ret, ref;
ret = ttm_bo_reserve(bo, false, false, false, NULL); ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -420,7 +420,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo) ...@@ -420,7 +420,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
{ {
int ret; int ret;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret) if (ret)
return ret; return ret;
......
...@@ -739,7 +739,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -739,7 +739,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
} }
mutex_lock(&cli->mutex); mutex_lock(&cli->mutex);
ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL); ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL);
if (ret) if (ret)
goto fail_unpin; goto fail_unpin;
...@@ -753,7 +753,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -753,7 +753,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (new_bo != old_bo) { if (new_bo != old_bo) {
ttm_bo_unreserve(&new_bo->bo); ttm_bo_unreserve(&new_bo->bo);
ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL);
if (ret) if (ret)
goto fail_unpin; goto fail_unpin;
} }
......
...@@ -71,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -71,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->vm) if (!cli->vm)
return 0; return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -156,7 +156,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -156,7 +156,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->vm) if (!cli->vm)
return; return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret) if (ret)
return; return;
...@@ -409,7 +409,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -409,7 +409,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
break; break;
} }
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
if (ret) { if (ret) {
list_splice_tail_init(&vram_list, &op->list); list_splice_tail_init(&vram_list, &op->list);
list_splice_tail_init(&gart_list, &op->list); list_splice_tail_init(&gart_list, &op->list);
......
...@@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait) ...@@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
{ {
int r; int r;
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
...@@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, ...@@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
{ {
int r; int r;
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
......
...@@ -832,7 +832,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) ...@@ -832,7 +832,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
{ {
int r; int r;
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
if (mem_type) if (mem_type)
......
...@@ -65,7 +65,7 @@ static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) ...@@ -65,7 +65,7 @@ static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
{ {
int r; int r;
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL); r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed\n", bo); dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
......
...@@ -452,7 +452,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ...@@ -452,7 +452,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret; int ret;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL); ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret) { if (!ret) {
if (!ttm_bo_wait(bo, false, false, true)) { if (!ttm_bo_wait(bo, false, false, true)) {
...@@ -526,7 +526,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, ...@@ -526,7 +526,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return -EBUSY; return -EBUSY;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL); ret = __ttm_bo_reserve(bo, false, true, NULL);
/* /*
* We raced, and lost, someone else holds the reservation now, * We raced, and lost, someone else holds the reservation now,
...@@ -595,11 +595,10 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) ...@@ -595,11 +595,10 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref); kref_get(&nentry->list_kref);
} }
ret = __ttm_bo_reserve(entry, false, true, false, NULL); ret = __ttm_bo_reserve(entry, false, true, NULL);
if (remove_all && ret) { if (remove_all && ret) {
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = __ttm_bo_reserve(entry, false, false, ret = __ttm_bo_reserve(entry, false, false, NULL);
false, NULL);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
} }
...@@ -741,7 +740,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -741,7 +740,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) { list_for_each_entry(bo, &man->lru, lru) {
ret = __ttm_bo_reserve(bo, false, true, false, NULL); ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret) { if (!ret) {
if (place && (place->fpfn || place->lpfn)) { if (place && (place->fpfn || place->lpfn)) {
/* Don't evict this BO if it's outside of the /* Don't evict this BO if it's outside of the
...@@ -1623,7 +1622,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ...@@ -1623,7 +1622,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
* Using ttm_bo_reserve makes sure the lru lists are updated. * Using ttm_bo_reserve makes sure the lru lists are updated.
*/ */
ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); ret = ttm_bo_reserve(bo, true, no_wait, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_wait(bo, false, true, no_wait); ret = ttm_bo_wait(bo, false, true, no_wait);
...@@ -1656,7 +1655,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ...@@ -1656,7 +1655,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) { list_for_each_entry(bo, &glob->swap_lru, swap) {
ret = __ttm_bo_reserve(bo, false, true, false, NULL); ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret) if (!ret)
break; break;
} }
...@@ -1755,7 +1754,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) ...@@ -1755,7 +1754,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
return -ERESTARTSYS; return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock)) if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock; goto out_unlock;
ret = __ttm_bo_reserve(bo, true, false, false, NULL); ret = __ttm_bo_reserve(bo, true, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unlock; goto out_unlock;
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
......
...@@ -108,7 +108,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -108,7 +108,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting * for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved. * for the buffer to become unreserved.
*/ */
ret = ttm_bo_reserve(bo, true, true, false, NULL); ret = ttm_bo_reserve(bo, true, true, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret != -EBUSY) if (ret != -EBUSY)
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
......
...@@ -112,8 +112,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -112,8 +112,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
......
...@@ -400,7 +400,7 @@ static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo, ...@@ -400,7 +400,7 @@ static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
{ {
int r; int r;
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
struct virtio_gpu_device *qdev = struct virtio_gpu_device *qdev =
......
...@@ -155,7 +155,7 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) ...@@ -155,7 +155,7 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
{ {
int r; int r;
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = ttm_bo_wait(&bo->tbo, true, true, no_wait); r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
......
...@@ -421,7 +421,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -421,7 +421,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
} }
bo = &buf->base; bo = &buf->base;
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL)); WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
ret = ttm_bo_wait(old_bo, false, false, false); ret = ttm_bo_wait(old_bo, false, false, false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
......
...@@ -56,7 +56,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, ...@@ -56,7 +56,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
vmw_execbuf_release_pinned_bo(dev_priv); vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err; goto err;
...@@ -98,7 +98,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, ...@@ -98,7 +98,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
vmw_execbuf_release_pinned_bo(dev_priv); vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err; goto err;
...@@ -174,7 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, ...@@ -174,7 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
return ret; return ret;
vmw_execbuf_release_pinned_bo(dev_priv); vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; goto err_unlock;
...@@ -225,7 +225,7 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv, ...@@ -225,7 +225,7 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err; goto err;
......
...@@ -326,7 +326,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -326,7 +326,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL); ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true); vmw_bo_pin_reserved(vbo, true);
......
...@@ -98,7 +98,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, ...@@ -98,7 +98,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0; kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL); ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n"); DRM_ERROR("reserve failed\n");
return -EINVAL; return -EINVAL;
...@@ -318,7 +318,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, ...@@ -318,7 +318,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT; kmap_num = (64*64*4) >> PAGE_SHIFT;
ret = ttm_bo_reserve(bo, true, false, false, NULL); ret = ttm_bo_reserve(bo, true, false, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n"); DRM_ERROR("reserve failed\n");
return; return;
...@@ -1859,7 +1859,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, ...@@ -1859,7 +1859,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
int ret; int ret;
ttm_bo_reserve(bo, false, false, interruptible, NULL); ttm_bo_reserve(bo, false, false, NULL);
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
validate_as_mob); validate_as_mob);
if (ret) if (ret)
......
...@@ -222,7 +222,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, ...@@ -222,7 +222,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
if (bo) { if (bo) {
int ret; int ret;
ret = ttm_bo_reserve(bo, false, true, false, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL); vmw_fence_single_bo(bo, NULL);
...@@ -262,7 +262,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ...@@ -262,7 +262,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_bo; goto out_no_bo;
ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL); ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -357,7 +357,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ...@@ -357,7 +357,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
vmw_takedown_otable_base(dev_priv, i, vmw_takedown_otable_base(dev_priv, i,
&batch->otables[i]); &batch->otables[i]);
ret = ttm_bo_reserve(bo, false, true, false, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL); vmw_fence_single_bo(bo, NULL);
...@@ -440,7 +440,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, ...@@ -440,7 +440,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL); ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
...@@ -545,7 +545,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, ...@@ -545,7 +545,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
const struct vmw_sg_table *vsgt; const struct vmw_sg_table *vsgt;
int ret; int ret;
ret = ttm_bo_reserve(bo, false, true, false, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vsgt = vmw_bo_sg_table(bo); vsgt = vmw_bo_sg_table(bo);
...@@ -595,7 +595,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, ...@@ -595,7 +595,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo = mob->pt_bo; struct ttm_buffer_object *bo = mob->pt_bo;
if (bo) { if (bo) {
ret = ttm_bo_reserve(bo, false, true, false, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
/* /*
* Noone else should be using this buffer. * Noone else should be using this buffer.
*/ */
......
...@@ -129,7 +129,7 @@ static void vmw_resource_release(struct kref *kref) ...@@ -129,7 +129,7 @@ static void vmw_resource_release(struct kref *kref)
if (res->backup) { if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base; struct ttm_buffer_object *bo = &res->backup->base;
ttm_bo_reserve(bo, false, false, false, NULL); ttm_bo_reserve(bo, false, false, NULL);
if (!list_empty(&res->mob_head) && if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) { res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
...@@ -1717,8 +1717,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) ...@@ -1717,8 +1717,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
if (res->backup) { if (res->backup) {
vbo = res->backup; vbo = res->backup;
ttm_bo_reserve(&vbo->base, interruptible, false, false, ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
NULL);
if (!vbo->pin_count) { if (!vbo->pin_count) {
ret = ttm_bo_validate ret = ttm_bo_validate
(&vbo->base, (&vbo->base,
...@@ -1773,7 +1772,7 @@ void vmw_resource_unpin(struct vmw_resource *res) ...@@ -1773,7 +1772,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
if (--res->pin_count == 0 && res->backup) { if (--res->pin_count == 0 && res->backup) {
struct vmw_dma_buffer *vbo = res->backup; struct vmw_dma_buffer *vbo = res->backup;
ttm_bo_reserve(&vbo->base, false, false, false, NULL); ttm_bo_reserve(&vbo->base, false, false, NULL);
vmw_bo_pin_reserved(vbo, false); vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->base); ttm_bo_unreserve(&vbo->base);
} }
......
...@@ -988,7 +988,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -988,7 +988,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); ret = ttm_bo_reserve(&buf->base, false, true, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto no_reserve; goto no_reserve;
......
...@@ -759,8 +759,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); ...@@ -759,8 +759,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting. * @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_ticket: If @bo is already reserved, Only sleep waiting for * @ticket: ticket used to acquire the ww_mutex.
* it to become unreserved if @ticket->stamp is older.
* *
* Will not remove reserved buffers from the lru lists. * Will not remove reserved buffers from the lru lists.
* Otherwise identical to ttm_bo_reserve. * Otherwise identical to ttm_bo_reserve.
...@@ -776,8 +775,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); ...@@ -776,8 +775,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
* be returned if @use_ticket is set to true. * be returned if @use_ticket is set to true.
*/ */
static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible, bool no_wait,
bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket) struct ww_acquire_ctx *ticket)
{ {
int ret = 0; int ret = 0;
...@@ -806,8 +804,7 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, ...@@ -806,8 +804,7 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting. * @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_ticket: If @bo is already reserved, Only sleep waiting for * @ticket: ticket used to acquire the ww_mutex.
* it to become unreserved if @ticket->stamp is older.
* *
* Locks a buffer object for validation. (Or prevents other processes from * Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking * locking it for validation) and removes it from lru lists, while taking
...@@ -846,15 +843,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, ...@@ -846,15 +843,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* be returned if @use_ticket is set to true. * be returned if @use_ticket is set to true.
*/ */
static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible, bool no_wait,
bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket) struct ww_acquire_ctx *ticket)
{ {
int ret; int ret;
WARN_ON(!atomic_read(&bo->kref.refcount)); WARN_ON(!atomic_read(&bo->kref.refcount));
ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket); ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
if (likely(ret == 0)) if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo); ttm_bo_del_sub_from_lru(bo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment