Commit 70d6894d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Serialize against vma moves

Make sure that when submitting requests, we always serialize against
potential vma moves and clflushes.

Time for a i915_request_await_vma() interface!
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819112033.30638-1-chris@chris-wilson.co.uk
parent a1e37b02
......@@ -1192,7 +1192,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto skip_request;
i915_vma_lock(batch);
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
......
......@@ -106,6 +106,8 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
int err;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (unlikely(err))
......@@ -171,6 +173,8 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
}
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
......
......@@ -228,6 +228,8 @@ static int gpu_set(struct drm_i915_gem_object *obj,
intel_ring_advance(rq, cs);
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_vma_unpin(vma);
......
......@@ -666,12 +666,16 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_request;
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
......@@ -1218,6 +1222,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err)
......@@ -1315,6 +1321,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
......
......@@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
}
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq,
EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_request_add(rq);
......
......@@ -139,12 +139,16 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
goto err_request;
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
......
......@@ -222,6 +222,8 @@ int intel_renderstate_emit(struct i915_request *rq)
}
i915_vma_lock(so.vma);
err = i915_request_await_object(rq, so.vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(so.vma, rq, 0);
i915_vma_unlock(so.vma);
err_unpin:
......
......@@ -118,6 +118,9 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj,
flags & EXEC_OBJECT_WRITE);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
......@@ -1237,6 +1240,9 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
}
i915_vma_lock(arg.vma);
err = i915_request_await_object(rq, arg.vma->obj,
flags & EXEC_OBJECT_WRITE);
if (err == 0)
err = i915_vma_move_to_active(arg.vma, rq, flags);
i915_vma_unlock(arg.vma);
......
......@@ -1459,11 +1459,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
if (vma) {
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
}
......
......@@ -113,6 +113,8 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
......
......@@ -876,6 +876,8 @@ static int live_all_engines(void *arg)
request[id]->batch = batch;
i915_vma_lock(batch);
err = i915_request_await_object(request[id], batch->obj, 0);
if (err == 0)
err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment