Commit eafc2aa2 authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915/execlists: Enable timeslice on partial virtual engine dequeue

If we stop filling the ELSP due to an incompatible virtual engine
request, check if we should enable the timeslice on behalf of the queue.

This fixes the case where we are inspecting the last->next element when
we know that the last element is the last request in the execution queue,
and so decided we did not need to enable timeslicing despite the intent
to do so!

Fixes: 8ee36e04 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: <stable@vger.kernel.org> # v5.4+
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200306113012.3184606-1-chris@chris-wilson.co.uk
(cherry picked from commit 3df2deed)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent 1d61c5d7
...@@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) ...@@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
if (!intel_engine_has_timeslices(engine)) if (!intel_engine_has_timeslices(engine))
return false; return false;
if (list_is_last(&rq->sched.link, &engine->active.requests)) hint = engine->execlists.queue_priority_hint;
return false; if (!list_is_last(&rq->sched.link, &engine->active.requests))
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
hint = max(rq_prio(list_next_entry(rq, sched.link)),
engine->execlists.queue_priority_hint);
return hint >= effective_prio(rq); return hint >= effective_prio(rq);
} }
...@@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine) ...@@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
} }
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
execlists->switch_priority_hint = execlists->queue_priority_hint;
if (timer_pending(&execlists->timer))
return;
set_timer_ms(&execlists->timer, timeslice(engine));
}
static void record_preemption(struct intel_engine_execlists *execlists) static void record_preemption(struct intel_engine_execlists *execlists)
{ {
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
...@@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy * Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be. * of timeslices, our queue might be.
*/ */
if (!execlists->timer.expires && start_timeslice(engine);
need_timeslice(engine, last))
set_timer_ms(&execlists->timer,
timeslice(engine));
return; return;
} }
} }
...@@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) { if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
return; /* leave this for another */ start_timeslice(engine);
return; /* leave this for another sibling */
} }
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment