Commit 5f5800a7 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Push EMIT_INVALIDATE at request start to backends

Move the common engine->emit_flush(EMIT_INVALIDATE) back to the backends
(where it was once previously) as we seek to specialise it in future
patches.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181207090213.14352-1-chris@chris-wilson.co.uk
parent d8f50531
...@@ -719,11 +719,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -719,11 +719,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/ */
rq->head = rq->ring->emit; rq->head = rq->ring->emit;
/* Unconditionally invalidate GPU caches and TLBs. */
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
goto err_unwind;
ret = engine->request_alloc(rq); ret = engine->request_alloc(rq);
if (ret) if (ret)
goto err_unwind; goto err_unwind;
......
...@@ -1276,17 +1276,20 @@ static int execlists_request_alloc(struct i915_request *request) ...@@ -1276,17 +1276,20 @@ static int execlists_request_alloc(struct i915_request *request)
GEM_BUG_ON(!request->hw_context->pin_count); GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after /*
* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just * we start building the request - in which case we will just
* have to repeat work. * have to repeat work.
*/ */
request->reserved_space += EXECLISTS_REQUEST_SIZE; request->reserved_space += EXECLISTS_REQUEST_SIZE;
ret = intel_ring_wait_for_space(request->ring, request->reserved_space); /* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret) if (ret)
return ret; return ret;
/* Note that after this point, we have committed to using /*
* Note that after this point, we have committed to using
* this request as it is being used to both track the * this request as it is being used to both track the
* state of engine initialisation and liveness of the * state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try * golden renderstate above. Think twice before you try
......
...@@ -1820,13 +1820,15 @@ static int ring_request_alloc(struct i915_request *request) ...@@ -1820,13 +1820,15 @@ static int ring_request_alloc(struct i915_request *request)
GEM_BUG_ON(!request->hw_context->pin_count); GEM_BUG_ON(!request->hw_context->pin_count);
/* Flush enough space to reduce the likelihood of waiting after /*
* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just * we start building the request - in which case we will just
* have to repeat work. * have to repeat work.
*/ */
request->reserved_space += LEGACY_REQUEST_SIZE; request->reserved_space += LEGACY_REQUEST_SIZE;
ret = intel_ring_wait_for_space(request->ring, request->reserved_space); /* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment