Commit 60ef5b7a authored by Chris Wilson's avatar Chris Wilson

drm/i915/execlists: Track active elements during dequeue

Record the initial active element we use when building the next ELSP
submission, so that we can compare against it latter to see if there's
no change.

Fixes: 44d0a9c0 ("drm/i915/execlists: Skip redundant resubmission")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200311092624.10012-2-chris@chris-wilson.co.uk
parent 408464b4
...@@ -1678,17 +1678,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, ...@@ -1678,17 +1678,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock); spin_unlock(&old->breadcrumbs.irq_lock);
} }
static struct i915_request *
last_active(const struct intel_engine_execlists *execlists)
{
struct i915_request * const *last = READ_ONCE(execlists->active);
while (*last && i915_request_completed(*last))
last++;
return *last;
}
#define for_each_waiter(p__, rq__) \ #define for_each_waiter(p__, rq__) \
list_for_each_entry_lockless(p__, \ list_for_each_entry_lockless(p__, \
&(rq__)->sched.waiters_list, \ &(rq__)->sched.waiters_list, \
...@@ -1827,11 +1816,9 @@ static void record_preemption(struct intel_engine_execlists *execlists) ...@@ -1827,11 +1816,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
} }
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{ {
struct i915_request *rq;
rq = last_active(&engine->execlists);
if (!rq) if (!rq)
return 0; return 0;
...@@ -1842,13 +1829,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) ...@@ -1842,13 +1829,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return READ_ONCE(engine->props.preempt_timeout_ms); return READ_ONCE(engine->props.preempt_timeout_ms);
} }
static void set_preempt_timeout(struct intel_engine_cs *engine) static void set_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{ {
if (!intel_engine_has_preempt_reset(engine)) if (!intel_engine_has_preempt_reset(engine))
return; return;
set_timer_ms(&engine->execlists.preempt, set_timer_ms(&engine->execlists.preempt,
active_preempt_timeout(engine)); active_preempt_timeout(engine, rq));
} }
static inline void clear_ports(struct i915_request **ports, int count) static inline void clear_ports(struct i915_request **ports, int count)
...@@ -1861,6 +1849,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1861,6 +1849,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending; struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask; struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request * const *active;
struct i915_request *last; struct i915_request *last;
struct rb_node *rb; struct rb_node *rb;
bool submit = false; bool submit = false;
...@@ -1915,7 +1904,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1915,7 +1904,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case * i.e. we will retrigger preemption following the ack in case
* of trouble. * of trouble.
*/ */
last = last_active(execlists); active = READ_ONCE(execlists->active);
while ((last = *active) && i915_request_completed(last))
active++;
if (last) { if (last) {
if (need_preempt(engine, last, rb)) { if (need_preempt(engine, last, rb)) {
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
...@@ -2201,7 +2193,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2201,7 +2193,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Skip if we ended up with exactly the same set of requests, * Skip if we ended up with exactly the same set of requests,
* e.g. trying to timeslice a pair of ordered contexts * e.g. trying to timeslice a pair of ordered contexts
*/ */
if (!memcmp(execlists->active, execlists->pending, if (!memcmp(active, execlists->pending,
(port - execlists->pending + 1) * sizeof(*port))) { (port - execlists->pending + 1) * sizeof(*port))) {
do do
execlists_schedule_out(fetch_and_zero(port)); execlists_schedule_out(fetch_and_zero(port));
...@@ -2212,7 +2204,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2212,7 +2204,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
clear_ports(port + 1, last_port - port); clear_ports(port + 1, last_port - port);
execlists_submit_ports(engine); execlists_submit_ports(engine);
set_preempt_timeout(engine); set_preempt_timeout(engine, *active);
} else { } else {
skip_submit: skip_submit:
ring_set_paused(engine, 0); ring_set_paused(engine, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment