Commit 5f15c1e6 authored by Chris Wilson's avatar Chris Wilson

drm/i915/guc: Use a local cancel_port_requests

Since execlists and the guc have diverged in their port tracking, we
cannot simply reuse the execlists cancellation code as it leads to
unbalanced reference counting. Use a local, simpler routine for the guc.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190812203626.3948-1-chris@chris-wilson.co.uk
parent 478ffad6
...@@ -136,9 +136,6 @@ execlists_active(const struct intel_engine_execlists *execlists) ...@@ -136,9 +136,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return READ_ONCE(*execlists->active); return READ_ONCE(*execlists->active);
} }
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
struct i915_request * struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
......
...@@ -1297,8 +1297,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1297,8 +1297,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
} }
} }
void static void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) cancel_port_requests(struct intel_engine_execlists * const execlists)
{ {
struct i915_request * const *port, *rq; struct i915_request * const *port, *rq;
...@@ -2355,7 +2355,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -2355,7 +2355,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
unwind: unwind:
/* Push back any incomplete requests for replay after the reset. */ /* Push back any incomplete requests for replay after the reset. */
execlists_cancel_port_requests(execlists); cancel_port_requests(execlists);
__unwind_incomplete_requests(engine); __unwind_incomplete_requests(engine);
} }
......
...@@ -517,11 +517,14 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx) ...@@ -517,11 +517,14 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{ {
trace_i915_request_in(rq, idx); trace_i915_request_in(rq, idx);
if (!rq->hw_context->inflight) /*
rq->hw_context->inflight = rq->engine; * Currently we are not tracking the rq->context being inflight
intel_context_inflight_inc(rq->hw_context); * (ce->inflight = rq->engine). It is only used by the execlists
intel_gt_pm_get(rq->engine->gt); * backend at the moment, a similar counting strategy would be
* required if we generalise the inflight tracking.
*/
intel_gt_pm_get(rq->engine->gt);
return i915_request_get(rq); return i915_request_get(rq);
} }
...@@ -529,10 +532,6 @@ static void schedule_out(struct i915_request *rq) ...@@ -529,10 +532,6 @@ static void schedule_out(struct i915_request *rq)
{ {
trace_i915_request_out(rq); trace_i915_request_out(rq);
intel_context_inflight_dec(rq->hw_context);
if (!intel_context_inflight_count(rq->hw_context))
rq->hw_context->inflight = NULL;
intel_gt_pm_put(rq->engine->gt); intel_gt_pm_put(rq->engine->gt);
i915_request_put(rq); i915_request_put(rq);
} }
...@@ -556,6 +555,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine) ...@@ -556,6 +555,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
last = NULL; last = NULL;
} }
/*
* We write directly into the execlists->inflight queue and don't use
* the execlists->pending queue, as we don't have a distinct switch
* event.
*/
port = first; port = first;
while ((rb = rb_first_cached(&execlists->queue))) { while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb); struct i915_priolist *p = to_priolist(rb);
...@@ -636,6 +640,19 @@ static void guc_reset_prepare(struct intel_engine_cs *engine) ...@@ -636,6 +640,19 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet); __tasklet_disable_sync_once(&execlists->tasklet);
} }
static void
cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct i915_request * const *port, *rq;
/* Note we are only using the inflight and not the pending queue */
for (port = execlists->active; (rq = *port); port++)
schedule_out(rq);
execlists->active =
memset(execlists->inflight, 0, sizeof(execlists->inflight));
}
static void guc_reset(struct intel_engine_cs *engine, bool stalled) static void guc_reset(struct intel_engine_cs *engine, bool stalled)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
...@@ -644,7 +661,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -644,7 +661,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
execlists_cancel_port_requests(execlists); cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */ /* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists); rq = execlists_unwind_incomplete_requests(execlists);
...@@ -687,7 +704,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) ...@@ -687,7 +704,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */ /* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists); cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */ /* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) { list_for_each_entry(rq, &engine->active.requests, sched.link) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment