Commit 7904e081 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Cancel submitted requests upon context reset

Since we process schedule-in of a context after submitting the request,
if we decide to reset the context at that time, we also have to cancel
the requets we have marked for submission.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201230220028.17089-1-chris@chris-wilson.co.uk
parent cecb2af4
...@@ -215,22 +215,32 @@ static void mark_eio(struct i915_request *rq) ...@@ -215,22 +215,32 @@ static void mark_eio(struct i915_request *rq)
} }
static struct i915_request * static struct i915_request *
active_request(const struct intel_timeline * const tl, struct i915_request *rq) __active_request(const struct intel_timeline * const tl,
struct i915_request *rq,
int error)
{ {
struct i915_request *active = rq; struct i915_request *active = rq;
rcu_read_lock(); list_for_each_entry_from_reverse(rq, &tl->requests, link) {
list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
if (__i915_request_is_complete(rq)) if (__i915_request_is_complete(rq))
break; break;
if (error) {
i915_request_set_error_once(rq, error);
__i915_request_skip(rq);
}
active = rq; active = rq;
} }
rcu_read_unlock();
return active; return active;
} }
static struct i915_request *
active_request(const struct intel_timeline * const tl, struct i915_request *rq)
{
return __active_request(tl, rq, 0);
}
static inline void static inline void
ring_set_paused(const struct intel_engine_cs *engine, int state) ring_set_paused(const struct intel_engine_cs *engine, int state)
{ {
...@@ -487,14 +497,14 @@ static void reset_active(struct i915_request *rq, ...@@ -487,14 +497,14 @@ static void reset_active(struct i915_request *rq,
* remain correctly ordered. And we defer to __i915_request_submit() * remain correctly ordered. And we defer to __i915_request_submit()
* so that all asynchronous waits are correctly handled. * so that all asynchronous waits are correctly handled.
*/ */
ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n", ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
rq->fence.context, rq->fence.seqno); rq->fence.context, rq->fence.seqno);
/* On resubmission of the active request, payload will be scrubbed */ /* On resubmission of the active request, payload will be scrubbed */
if (__i915_request_is_complete(rq)) if (__i915_request_is_complete(rq))
head = rq->tail; head = rq->tail;
else else
head = active_request(ce->timeline, rq)->head; head = __active_request(ce->timeline, rq, -EIO)->head;
head = intel_ring_wrap(ce->ring, head); head = intel_ring_wrap(ce->ring, head);
/* Scrub the context image to prevent replaying the previous batch */ /* Scrub the context image to prevent replaying the previous batch */
......
...@@ -490,6 +490,8 @@ void __i915_request_skip(struct i915_request *rq) ...@@ -490,6 +490,8 @@ void __i915_request_skip(struct i915_request *rq)
if (rq->infix == rq->postfix) if (rq->infix == rq->postfix)
return; return;
RQ_TRACE(rq, "error: %d\n", rq->fence.error);
/* /*
* As this request likely depends on state from the lost * As this request likely depends on state from the lost
* context, clear out all the user operations leaving the * context, clear out all the user operations leaving the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment