Commit 1d7f5e6c authored by Christian König's avatar Christian König

drm/i915: drop bo->moving dependency

That should now be handled by the common dma_resv framework.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Cc: intel-gfx@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-13-christian.koenig@amd.com
parent 46b35b33
...@@ -741,30 +741,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = { ...@@ -741,30 +741,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
/** /**
* i915_gem_object_get_moving_fence - Get the object's moving fence if any * i915_gem_object_get_moving_fence - Get the object's moving fence if any
* @obj: The object whose moving fence to get. * @obj: The object whose moving fence to get.
* @fence: The resulting fence
* *
* A non-signaled moving fence means that there is an async operation * A non-signaled moving fence means that there is an async operation
* pending on the object that needs to be waited on before setting up * pending on the object that needs to be waited on before setting up
* any GPU- or CPU PTEs to the object's pages. * any GPU- or CPU PTEs to the object's pages.
* *
* Return: A refcounted pointer to the object's moving fence if any, * Return: Negative error code or 0 for success.
* NULL otherwise.
*/ */
struct dma_fence * int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj) struct dma_fence **fence)
{ {
return dma_fence_get(i915_gem_to_ttm(obj)->moving); return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
} fence);
void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence)
{
struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
if (*moving == fence)
return;
dma_fence_put(*moving);
*moving = dma_fence_get(fence);
} }
/** /**
...@@ -782,23 +771,9 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj, ...@@ -782,23 +771,9 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr) bool intr)
{ {
struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
int ret;
assert_object_held(obj); assert_object_held(obj);
if (!fence) return dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
return 0; intr, MAX_SCHEDULE_TIMEOUT);
ret = dma_fence_wait(fence, intr);
if (ret)
return ret;
if (fence->error)
return fence->error;
i915_gem_to_ttm(obj)->moving = NULL;
dma_fence_put(fence);
return 0;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
......
...@@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj) ...@@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
} }
struct dma_fence * int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj); struct dma_fence **fence);
void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence);
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr); bool intr);
......
...@@ -467,19 +467,6 @@ __i915_ttm_move(struct ttm_buffer_object *bo, ...@@ -467,19 +467,6 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
return fence; return fence;
} }
static int
prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
struct i915_deps *deps)
{
int ret;
ret = i915_deps_add_dependency(deps, bo->moving, ctx);
if (!ret)
ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
return ret;
}
/** /**
* i915_ttm_move - The TTM move callback used by i915. * i915_ttm_move - The TTM move callback used by i915.
* @bo: The buffer object. * @bo: The buffer object.
...@@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, ...@@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct i915_deps deps; struct i915_deps deps;
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = prev_deps(bo, ctx, &deps); ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
if (ret) { if (ret) {
i915_refct_sgt_put(dst_rsgt); i915_refct_sgt_put(dst_rsgt);
return ret; return ret;
......
...@@ -219,8 +219,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt, ...@@ -219,8 +219,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
err = dma_resv_reserve_fences(obj->base.resv, 1); err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err) if (!err)
dma_resv_add_fence(obj->base.resv, &rq->fence, dma_resv_add_fence(obj->base.resv, &rq->fence,
DMA_RESV_USAGE_WRITE); DMA_RESV_USAGE_KERNEL);
i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq); i915_request_put(rq);
} }
if (err) if (err)
......
...@@ -1221,8 +1221,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements, ...@@ -1221,8 +1221,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
if (rq) { if (rq) {
dma_resv_add_fence(obj->base.resv, &rq->fence, dma_resv_add_fence(obj->base.resv, &rq->fence,
DMA_RESV_USAGE_WRITE); DMA_RESV_USAGE_KERNEL);
i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq); i915_request_put(rq);
} }
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
......
...@@ -1357,10 +1357,17 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, ...@@ -1357,10 +1357,17 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err) if (err)
return err; return err;
if (vma->obj) {
err = i915_gem_object_get_moving_fence(vma->obj, &moving);
if (err)
return err;
} else {
moving = NULL;
}
if (flags & PIN_GLOBAL) if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
if (flags & vma->vm->bind_async_flags || moving) { if (flags & vma->vm->bind_async_flags || moving) {
/* lock VM */ /* lock VM */
err = i915_vm_lock_objects(vma->vm, ww); err = i915_vm_lock_objects(vma->vm, ww);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment