Commit baa7c2cd authored by Chris Wilson's avatar Chris Wilson

drm/i915: Refactor marking a request as EIO

When wedging the device, we cancel all outstanding requests and mark
them as EIO. Rather than duplicate the small function to do so between
each submission backend, export one.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Andi Shyti <andi.shyti@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210109163455.28466-3-chris@chris-wilson.co.uk
parent e3aabe31
...@@ -203,17 +203,6 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) ...@@ -203,17 +203,6 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
return container_of(engine, struct virtual_engine, base); return container_of(engine, struct virtual_engine, base);
} }
static void mark_eio(struct i915_request *rq)
{
if (__i915_request_is_complete(rq))
return;
GEM_BUG_ON(i915_request_signaled(rq));
i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
static struct i915_request * static struct i915_request *
__active_request(const struct intel_timeline * const tl, __active_request(const struct intel_timeline * const tl,
struct i915_request *rq, struct i915_request *rq,
...@@ -2996,7 +2985,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) ...@@ -2996,7 +2985,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */ /* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) list_for_each_entry(rq, &engine->active.requests, sched.link)
mark_eio(rq); i915_request_mark_eio(rq);
intel_engine_signal_breadcrumbs(engine); intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */ /* Flush the queued requests to the timeline list (for retiring). */
...@@ -3005,7 +2994,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) ...@@ -3005,7 +2994,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
int i; int i;
priolist_for_each_request_consume(rq, rn, p, i) { priolist_for_each_request_consume(rq, rn, p, i) {
mark_eio(rq); i915_request_mark_eio(rq);
__i915_request_submit(rq); __i915_request_submit(rq);
} }
...@@ -3015,7 +3004,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) ...@@ -3015,7 +3004,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* On-hold requests will be flushed to timeline upon their release */ /* On-hold requests will be flushed to timeline upon their release */
list_for_each_entry(rq, &engine->active.hold, sched.link) list_for_each_entry(rq, &engine->active.hold, sched.link)
mark_eio(rq); i915_request_mark_eio(rq);
/* Cancel all attached virtual engines */ /* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) { while ((rb = rb_first_cached(&execlists->virtual))) {
...@@ -3028,7 +3017,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) ...@@ -3028,7 +3017,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
spin_lock(&ve->base.active.lock); spin_lock(&ve->base.active.lock);
rq = fetch_and_zero(&ve->request); rq = fetch_and_zero(&ve->request);
if (rq) { if (rq) {
mark_eio(rq); i915_request_mark_eio(rq);
rq->engine = engine; rq->engine = engine;
__i915_request_submit(rq); __i915_request_submit(rq);
......
...@@ -473,10 +473,8 @@ static void reset_cancel(struct intel_engine_cs *engine) ...@@ -473,10 +473,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */ /* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) { list_for_each_entry(request, &engine->active.requests, sched.link)
i915_request_set_error_once(request, -EIO); i915_request_mark_eio(request);
i915_request_mark_complete(request);
}
intel_engine_signal_breadcrumbs(engine); intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */ /* Remaining _unready_ requests will be nop'ed when submitted */
......
...@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled) ...@@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
GEM_BUG_ON(stalled); GEM_BUG_ON(stalled);
} }
static void mark_eio(struct i915_request *rq)
{
if (i915_request_completed(rq))
return;
GEM_BUG_ON(i915_request_signaled(rq));
i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
static void mock_reset_cancel(struct intel_engine_cs *engine) static void mock_reset_cancel(struct intel_engine_cs *engine)
{ {
struct mock_engine *mock = struct mock_engine *mock =
...@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine) ...@@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */ /* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) list_for_each_entry(rq, &engine->active.requests, sched.link)
mark_eio(rq); i915_request_mark_eio(rq);
intel_engine_signal_breadcrumbs(engine); intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */ /* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) { list_for_each_entry(rq, &mock->hw_queue, mock.link) {
mark_eio(rq); i915_request_mark_eio(rq);
__i915_request_submit(rq); __i915_request_submit(rq);
} }
INIT_LIST_HEAD(&mock->hw_queue); INIT_LIST_HEAD(&mock->hw_queue);
......
...@@ -515,6 +515,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error) ...@@ -515,6 +515,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error)
} while (!try_cmpxchg(&rq->fence.error, &old, error)); } while (!try_cmpxchg(&rq->fence.error, &old, error));
} }
void i915_request_mark_eio(struct i915_request *rq)
{
if (__i915_request_is_complete(rq))
return;
GEM_BUG_ON(i915_request_signaled(rq));
i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
bool __i915_request_submit(struct i915_request *request) bool __i915_request_submit(struct i915_request *request)
{ {
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
......
...@@ -309,8 +309,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp); ...@@ -309,8 +309,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check struct i915_request * __must_check
i915_request_create(struct intel_context *ce); i915_request_create(struct intel_context *ce);
void i915_request_set_error_once(struct i915_request *rq, int error);
void __i915_request_skip(struct i915_request *rq); void __i915_request_skip(struct i915_request *rq);
void i915_request_set_error_once(struct i915_request *rq, int error);
void i915_request_mark_eio(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request); struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq, void __i915_request_queue(struct i915_request *rq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment