Commit b2dbf8d9 authored by Chris Wilson's avatar Chris Wilson

drm/i915/blt: Remove recursive vma->lock

As we have already plugged the w->dma into the reservation_object, and
have set ourselves up to automatically signal the request and w->dma on
completion, we do not need to export the rq->fence directly and just use
the w->dma fence.

This avoids having to take the reservation_lock inside the worker which
cross-release lockdep would complain about. :)
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621215733.12070-1-chris@chris-wilson.co.uk
parent fb993aa7
...@@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm, ...@@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
vma->ops = &proxy_vma_ops; vma->ops = &proxy_vma_ops;
sleeve->vma = vma; sleeve->vma = vma;
sleeve->obj = i915_gem_object_get(obj);
sleeve->pages = pages; sleeve->pages = pages;
sleeve->page_sizes = *page_sizes; sleeve->page_sizes = *page_sizes;
...@@ -85,7 +84,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm, ...@@ -85,7 +84,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
static void destroy_sleeve(struct i915_sleeve *sleeve) static void destroy_sleeve(struct i915_sleeve *sleeve)
{ {
i915_gem_object_put(sleeve->obj);
kfree(sleeve); kfree(sleeve);
} }
...@@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work) ...@@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work)
{ {
struct clear_pages_work *w = container_of(work, typeof(*w), work); struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_private *i915 = w->ce->gem_context->i915; struct drm_i915_private *i915 = w->ce->gem_context->i915;
struct drm_i915_gem_object *obj = w->sleeve->obj; struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma; struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq; struct i915_request *rq;
int err = w->dma.error; int err = w->dma.error;
...@@ -193,10 +191,12 @@ static void clear_pages_worker(struct work_struct *work) ...@@ -193,10 +191,12 @@ static void clear_pages_worker(struct work_struct *work)
goto out_request; goto out_request;
} }
/* XXX: more feverish nightmares await */ /*
i915_vma_lock(vma); * w->dma is already exported via (vma|obj)->resv we need only
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); * keep track of the GPU activity within this vma/request, and
i915_vma_unlock(vma); * propagate the signal from the request to w->dma.
*/
err = i915_active_ref(&vma->active, rq->fence.context, rq);
if (err) if (err)
goto out_request; goto out_request;
......
...@@ -63,17 +63,6 @@ static int igt_client_fill(void *arg) ...@@ -63,17 +63,6 @@ static int igt_client_fill(void *arg)
if (err) if (err)
goto err_unpin; goto err_unpin;
/*
* XXX: For now do the wait without the object resv lock to
* ensure we don't deadlock.
*/
err = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
i915_gem_object_lock(obj); i915_gem_object_lock(obj);
err = i915_gem_object_set_to_cpu_domain(obj, false); err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment