Commit 317e0395 authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915/execlists: Take a reference while capturing the guilty request

Thanks to preempt-to-busy, we leave the request on the HW as we submit
the preemption request. This means that the request may complete at any
moment as we process HW events, and in particular the request may be
retired as we are planning to capture it for a preemption timeout.

Be more careful while obtaining the request to capture after a
preemption timeout, and check to see if it completed before we were able
to put it on the on-hold list. If we do see it did complete just before
we capture the request, proclaim the preemption-timeout a false positive
and pardon the reset as we should hit an arbitration point momentarily
and so be able to process the preemption.

Note that even after we move the request to be on hold it may be retired
(as the reset to stop the HW comes after), so we do require to hold our
own reference as we work on the request for capture (and all of the
peeking at state within the request needs to be carefully protected).

Fixes: c3f1ed90 ("drm/i915/gt: Allow temporary suspension of inflight requests")
Closes: https://gitlab.freedesktop.org/drm/intel/issues/997Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122140243.495621-1-chris@chris-wilson.co.uk
(cherry picked from commit 4ba5c086)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent ad18ba7b
...@@ -2393,11 +2393,16 @@ static void __execlists_hold(struct i915_request *rq) ...@@ -2393,11 +2393,16 @@ static void __execlists_hold(struct i915_request *rq)
} while (rq); } while (rq);
} }
static void execlists_hold(struct intel_engine_cs *engine, static bool execlists_hold(struct intel_engine_cs *engine,
struct i915_request *rq) struct i915_request *rq)
{ {
spin_lock_irq(&engine->active.lock); spin_lock_irq(&engine->active.lock);
if (i915_request_completed(rq)) { /* too late! */
rq = NULL;
goto unlock;
}
/* /*
* Transfer this request onto the hold queue to prevent it * Transfer this request onto the hold queue to prevent it
* being resumbitted to HW (and potentially completed) before we have * being resumbitted to HW (and potentially completed) before we have
...@@ -2408,7 +2413,9 @@ static void execlists_hold(struct intel_engine_cs *engine, ...@@ -2408,7 +2413,9 @@ static void execlists_hold(struct intel_engine_cs *engine,
GEM_BUG_ON(rq->engine != engine); GEM_BUG_ON(rq->engine != engine);
__execlists_hold(rq); __execlists_hold(rq);
unlock:
spin_unlock_irq(&engine->active.lock); spin_unlock_irq(&engine->active.lock);
return rq;
} }
static bool hold_request(const struct i915_request *rq) static bool hold_request(const struct i915_request *rq)
...@@ -2524,6 +2531,7 @@ static void execlists_capture_work(struct work_struct *work) ...@@ -2524,6 +2531,7 @@ static void execlists_capture_work(struct work_struct *work)
/* Return this request and all that depend upon it for signaling */ /* Return this request and all that depend upon it for signaling */
execlists_unhold(engine, cap->rq); execlists_unhold(engine, cap->rq);
i915_request_put(cap->rq);
kfree(cap); kfree(cap);
} }
...@@ -2560,12 +2568,12 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) ...@@ -2560,12 +2568,12 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
return NULL; return NULL;
} }
static void execlists_capture(struct intel_engine_cs *engine) static bool execlists_capture(struct intel_engine_cs *engine)
{ {
struct execlists_capture *cap; struct execlists_capture *cap;
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
return; return true;
/* /*
* We need to _quickly_ capture the engine state before we reset. * We need to _quickly_ capture the engine state before we reset.
...@@ -2574,13 +2582,17 @@ static void execlists_capture(struct intel_engine_cs *engine) ...@@ -2574,13 +2582,17 @@ static void execlists_capture(struct intel_engine_cs *engine)
*/ */
cap = capture_regs(engine); cap = capture_regs(engine);
if (!cap) if (!cap)
return; return true;
cap->rq = execlists_active(&engine->execlists); cap->rq = execlists_active(&engine->execlists);
GEM_BUG_ON(!cap->rq); GEM_BUG_ON(!cap->rq);
rcu_read_lock();
cap->rq = active_request(cap->rq->context->timeline, cap->rq); cap->rq = active_request(cap->rq->context->timeline, cap->rq);
GEM_BUG_ON(!cap->rq); cap->rq = i915_request_get_rcu(cap->rq);
rcu_read_unlock();
if (!cap->rq)
goto err_free;
/* /*
* Remove the request from the execlists queue, and take ownership * Remove the request from the execlists queue, and take ownership
...@@ -2602,10 +2614,19 @@ static void execlists_capture(struct intel_engine_cs *engine) ...@@ -2602,10 +2614,19 @@ static void execlists_capture(struct intel_engine_cs *engine)
* simply hold that request accountable for being non-preemptible * simply hold that request accountable for being non-preemptible
* long enough to force the reset. * long enough to force the reset.
*/ */
execlists_hold(engine, cap->rq); if (!execlists_hold(engine, cap->rq))
goto err_rq;
INIT_WORK(&cap->work, execlists_capture_work); INIT_WORK(&cap->work, execlists_capture_work);
schedule_work(&cap->work); schedule_work(&cap->work);
return true;
err_rq:
i915_request_put(cap->rq);
err_free:
i915_gpu_coredump_put(cap->error);
kfree(cap);
return false;
} }
static noinline void preempt_reset(struct intel_engine_cs *engine) static noinline void preempt_reset(struct intel_engine_cs *engine)
...@@ -2627,8 +2648,10 @@ static noinline void preempt_reset(struct intel_engine_cs *engine) ...@@ -2627,8 +2648,10 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
jiffies_to_msecs(jiffies - engine->execlists.preempt.expires)); jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
ring_set_paused(engine, 1); /* Freeze the current request in place */ ring_set_paused(engine, 1); /* Freeze the current request in place */
execlists_capture(engine); if (execlists_capture(engine))
intel_engine_reset(engine, "preemption time out"); intel_engine_reset(engine, "preemption time out");
else
ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet); tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock); clear_and_wake_up_bit(bit, lock);
......
...@@ -335,7 +335,6 @@ static int live_hold_reset(void *arg) ...@@ -335,7 +335,6 @@ static int live_hold_reset(void *arg)
if (test_and_set_bit(I915_RESET_ENGINE + id, if (test_and_set_bit(I915_RESET_ENGINE + id,
&gt->reset.flags)) { &gt->reset.flags)) {
spin_unlock_irq(&engine->active.lock);
intel_gt_set_wedged(gt); intel_gt_set_wedged(gt);
err = -EBUSY; err = -EBUSY;
goto out; goto out;
...@@ -345,6 +344,7 @@ static int live_hold_reset(void *arg) ...@@ -345,6 +344,7 @@ static int live_hold_reset(void *arg)
engine->execlists.tasklet.func(engine->execlists.tasklet.data); engine->execlists.tasklet.func(engine->execlists.tasklet.data);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq); GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
execlists_hold(engine, rq); execlists_hold(engine, rq);
GEM_BUG_ON(!i915_request_on_hold(rq)); GEM_BUG_ON(!i915_request_on_hold(rq));
...@@ -356,7 +356,6 @@ static int live_hold_reset(void *arg) ...@@ -356,7 +356,6 @@ static int live_hold_reset(void *arg)
&gt->reset.flags); &gt->reset.flags);
/* Check that we do not resubmit the held request */ /* Check that we do not resubmit the held request */
i915_request_get(rq);
if (!i915_request_wait(rq, 0, HZ / 5)) { if (!i915_request_wait(rq, 0, HZ / 5)) {
pr_err("%s: on hold request completed!\n", pr_err("%s: on hold request completed!\n",
engine->name); engine->name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment