Commit 6c9a8cda authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Avoid lock dropping between rescheduling

Unlocking is dangerous. In this case we combine an early update to the
out-of-queue request, because we know that it will be inserted into the
correct FIFO priority-ordered slot when it becomes ready in the future.
However, given sufficient enthusiasm, it may become ready as we are
continuing to reschedule, and so may gazump the FIFO if we have since
dropped its spinlock. The result is that it may be executed too early,
before its dependencies.

v2: Move all work into the second phase over the topological sort. This
removes the shortcut on the out-of-rbtree request to ensure that we only
adjust its priority after adjusting all of its dependencies.

Fixes: 20311bd3 ("drm/i915/scheduler: Execute requests in order of priorities")
Testcase: igt/gem_exec_whisper
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: <stable@vger.kernel.org> # v4.10+
Link: http://patchwork.freedesktop.org/patch/msgid/20170327202143.7972-1-chris@chris-wilson.co.ukReviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
(cherry picked from commit a79a524e)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent dd68f2ba
...@@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) ...@@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
static struct intel_engine_cs * static struct intel_engine_cs *
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine =
container_of(pt, struct drm_i915_gem_request, priotree)->engine;
GEM_BUG_ON(!locked);
engine = container_of(pt,
struct drm_i915_gem_request,
priotree)->engine;
if (engine != locked) { if (engine != locked) {
if (locked) spin_unlock(&locked->timeline->lock);
spin_unlock_irq(&locked->timeline->lock); spin_lock(&engine->timeline->lock);
spin_lock_irq(&engine->timeline->lock);
} }
return engine; return engine;
...@@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) ...@@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
static void execlists_schedule(struct drm_i915_gem_request *request, int prio) static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
{ {
struct intel_engine_cs *engine = NULL; struct intel_engine_cs *engine;
struct i915_dependency *dep, *p; struct i915_dependency *dep, *p;
struct i915_dependency stack; struct i915_dependency stack;
LIST_HEAD(dfs); LIST_HEAD(dfs);
...@@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) ...@@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
list_for_each_entry_safe(dep, p, &dfs, dfs_link) { list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler; struct i915_priotree *pt = dep->signaler;
list_for_each_entry(p, &pt->signalers_list, signal_link) /* Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
* (redundant dependencies are not eliminated) and across
* engines.
*/
list_for_each_entry(p, &pt->signalers_list, signal_link) {
GEM_BUG_ON(p->signaler->priority < pt->priority);
if (prio > READ_ONCE(p->signaler->priority)) if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs); list_move_tail(&p->dfs_link, &dfs);
}
list_safe_reset_next(dep, p, dfs_link); list_safe_reset_next(dep, p, dfs_link);
if (!RB_EMPTY_NODE(&pt->node))
continue;
engine = pt_lock_engine(pt, engine);
/* If it is not already in the rbtree, we can update the
* priority inplace and skip over it (and its dependencies)
* if it is referenced *again* as we descend the dfs.
*/
if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
pt->priority = prio;
list_del_init(&dep->dfs_link);
}
} }
engine = request->engine;
spin_lock_irq(&engine->timeline->lock);
/* Fifo and depth-first replacement ensure our deps execute before us */ /* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler; struct i915_priotree *pt = dep->signaler;
...@@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) ...@@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (prio <= pt->priority) if (prio <= pt->priority)
continue; continue;
GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
pt->priority = prio; pt->priority = prio;
rb_erase(&pt->node, &engine->execlist_queue); if (!RB_EMPTY_NODE(&pt->node)) {
if (insert_request(pt, &engine->execlist_queue)) rb_erase(&pt->node, &engine->execlist_queue);
engine->execlist_first = &pt->node; if (insert_request(pt, &engine->execlist_queue))
engine->execlist_first = &pt->node;
}
} }
if (engine) spin_unlock_irq(&engine->timeline->lock);
spin_unlock_irq(&engine->timeline->lock);
/* XXX Do we need to preempt to make room for us and our deps? */ /* XXX Do we need to preempt to make room for us and our deps? */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment