Commit da99fe5f authored by Chris Wilson's avatar Chris Wilson

drm/i915: Refactor export_fence() after i915_vma_move_to_active()

Currently all callers are responsible for adding the vma to the active
timeline and then exporting its fence. Combine the two operations into
i915_vma_move_to_active() to move all the extra handling from the
callers to the single site.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-1-chris@chris-wilson.co.uk
parent 8fdbfd86
...@@ -1166,15 +1166,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1166,15 +1166,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
i915_vma_move_to_active(batch, rq, 0); i915_vma_move_to_active(batch, rq, 0);
reservation_object_lock(batch->resv, NULL);
reservation_object_add_excl_fence(batch->resv, &rq->fence);
reservation_object_unlock(batch->resv);
i915_vma_unpin(batch); i915_vma_unpin(batch);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
reservation_object_lock(vma->resv, NULL);
reservation_object_add_excl_fence(vma->resv, &rq->fence);
reservation_object_unlock(vma->resv);
rq->batch = batch; rq->batch = batch;
...@@ -1771,25 +1765,6 @@ static int eb_relocate(struct i915_execbuffer *eb) ...@@ -1771,25 +1765,6 @@ static int eb_relocate(struct i915_execbuffer *eb)
return eb_relocate_slow(eb); return eb_relocate_slow(eb);
} }
static void eb_export_fence(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
struct reservation_object *resv = vma->resv;
/*
* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption.
*/
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv) == 0)
reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
}
static int eb_move_to_gpu(struct i915_execbuffer *eb) static int eb_move_to_gpu(struct i915_execbuffer *eb)
{ {
const unsigned int count = eb->buffer_count; const unsigned int count = eb->buffer_count;
...@@ -1844,7 +1819,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -1844,7 +1819,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
struct i915_vma *vma = eb->vma[i]; struct i915_vma *vma = eb->vma[i];
i915_vma_move_to_active(vma, eb->request, flags); i915_vma_move_to_active(vma, eb->request, flags);
eb_export_fence(vma, eb->request, flags);
__eb_unreserve_vma(vma, flags); __eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL; vma->exec_flags = NULL;
...@@ -1884,6 +1858,25 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) ...@@ -1884,6 +1858,25 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
return true; return true;
} }
static void export_fence(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
struct reservation_object *resv = vma->resv;
/*
* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption.
*/
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv) == 0)
reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
}
void i915_vma_move_to_active(struct i915_vma *vma, void i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq, struct i915_request *rq,
unsigned int flags) unsigned int flags)
...@@ -1921,6 +1914,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1921,6 +1914,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
if (flags & EXEC_OBJECT_NEEDS_FENCE) if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_gem_active_set(&vma->last_fence, rq); i915_gem_active_set(&vma->last_fence, rq);
export_fence(vma, rq, flags);
} }
static int i915_reset_gen7_sol_offsets(struct i915_request *rq) static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
......
...@@ -998,10 +998,6 @@ static int gpu_write(struct i915_vma *vma, ...@@ -998,10 +998,6 @@ static int gpu_write(struct i915_vma *vma,
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
reservation_object_lock(vma->resv, NULL);
reservation_object_add_excl_fence(vma->resv, &rq->fence);
reservation_object_unlock(vma->resv);
err_request: err_request:
i915_request_add(rq); i915_request_add(rq);
......
...@@ -225,10 +225,6 @@ static int gpu_set(struct drm_i915_gem_object *obj, ...@@ -225,10 +225,6 @@ static int gpu_set(struct drm_i915_gem_object *obj,
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma); i915_vma_unpin(vma);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
i915_request_add(rq); i915_request_add(rq);
return 0; return 0;
......
...@@ -178,10 +178,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -178,10 +178,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma); i915_vma_unpin(vma);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
i915_request_add(rq); i915_request_add(rq);
return 0; return 0;
......
...@@ -466,10 +466,6 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) ...@@ -466,10 +466,6 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
reservation_object_lock(vma->resv, NULL);
reservation_object_add_excl_fence(vma->resv, &rq->fence);
reservation_object_unlock(vma->resv);
i915_request_add(rq); i915_request_add(rq);
i915_gem_object_set_active_reference(obj); i915_gem_object_set_active_reference(obj);
......
...@@ -68,9 +68,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -68,9 +68,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
reservation_object_lock(vma->resv, NULL);
reservation_object_add_excl_fence(vma->resv, &rq->fence);
reservation_object_unlock(vma->resv);
i915_gem_object_get(result); i915_gem_object_get(result);
i915_gem_object_set_active_reference(result); i915_gem_object_set_active_reference(result);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment