Commit 4cc79cbb authored by Chris Wilson's avatar Chris Wilson

drm/i915/execlists: Drop promotion on unsubmit

With the disappearance of NEWCLIENT, we no longer need to provide the
priority boost on preemption in order to prevent repeated gazumping,
and we can remove the dead code.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190515130052.4475-5-chris@chris-wilson.co.uk
parent 68fc728b
...@@ -164,8 +164,6 @@ ...@@ -164,8 +164,6 @@
#define WA_TAIL_DWORDS 2 #define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
#define ACTIVE_PRIORITY (I915_PRIORITY_NOSEMAPHORE)
static int execlists_context_deferred_alloc(struct intel_context *ce, static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state, static void execlists_init_reg_state(u32 *reg_state,
...@@ -189,23 +187,12 @@ static int effective_prio(const struct i915_request *rq) ...@@ -189,23 +187,12 @@ static int effective_prio(const struct i915_request *rq)
/* /*
* On unwinding the active request, we give it a priority bump * On unwinding the active request, we give it a priority bump
* equivalent to a freshly submitted request. This protects it from * if it has completed waiting on any semaphore. If we know that
* being gazumped again, but it would be preferable if we didn't * the request has already started, we can prevent an unwanted
* let it be gazumped in the first place! * preempt-to-idle cycle by taking that into account now.
*
* See __unwind_incomplete_requests()
*/ */
if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) { if (__i915_request_has_started(rq))
/* prio |= I915_PRIORITY_NOSEMAPHORE;
* After preemption, we insert the active request at the
* end of the new priority level. This means that we will be
* _lower_ priority than the preemptee all things equal (and
* so the preemption is valid), so adjust our comparison
* accordingly.
*/
prio |= ACTIVE_PRIORITY;
prio--;
}
/* Restrict mere WAIT boosts from triggering preemption */ /* Restrict mere WAIT boosts from triggering preemption */
return prio | __NO_PREEMPTION; return prio | __NO_PREEMPTION;
...@@ -371,11 +358,11 @@ static void unwind_wa_tail(struct i915_request *rq) ...@@ -371,11 +358,11 @@ static void unwind_wa_tail(struct i915_request *rq)
} }
static struct i915_request * static struct i915_request *
__unwind_incomplete_requests(struct intel_engine_cs *engine, int boost) __unwind_incomplete_requests(struct intel_engine_cs *engine)
{ {
struct i915_request *rq, *rn, *active = NULL; struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl); struct list_head *uninitialized_var(pl);
int prio = I915_PRIORITY_INVALID | boost; int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->timeline.lock);
...@@ -402,31 +389,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine, int boost) ...@@ -402,31 +389,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine, int boost)
active = rq; active = rq;
} }
/*
* The active request is now effectively the start of a new client
* stream, so give it the equivalent small priority bump to prevent
* it being gazumped a second time by another peer.
*
* Note we have to be careful not to apply a priority boost to a request
* still spinning on its semaphores. If the request hasn't started, that
* means it is still waiting for its dependencies to be signaled, and
* if we apply a priority boost to this request, we will boost it past
* its signalers and so break PI.
*
* One consequence of this preemption boost is that we may jump
* over lesser priorities (such as I915_PRIORITY_WAIT), effectively
* making those priorities non-preemptible. They will be moved forward
* in the priority queue, but they will not gain immediate access to
* the GPU.
*/
if (~prio & boost && __i915_request_has_started(active)) {
prio |= boost;
GEM_BUG_ON(active->sched.attr.priority >= prio);
active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
}
return active; return active;
} }
...@@ -436,7 +398,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) ...@@ -436,7 +398,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
struct intel_engine_cs *engine = struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists); container_of(execlists, typeof(*engine), execlists);
return __unwind_incomplete_requests(engine, 0); return __unwind_incomplete_requests(engine);
} }
static inline void static inline void
...@@ -657,8 +619,7 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists) ...@@ -657,8 +619,7 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists)
execlists_cancel_port_requests(execlists); execlists_cancel_port_requests(execlists);
__unwind_incomplete_requests(container_of(execlists, __unwind_incomplete_requests(container_of(execlists,
struct intel_engine_cs, struct intel_engine_cs,
execlists), execlists));
ACTIVE_PRIORITY);
} }
static void execlists_dequeue(struct intel_engine_cs *engine) static void execlists_dequeue(struct intel_engine_cs *engine)
...@@ -1911,7 +1872,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -1911,7 +1872,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
execlists_cancel_port_requests(execlists); execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */ /* Push back any incomplete requests for replay after the reset. */
rq = __unwind_incomplete_requests(engine, 0); rq = __unwind_incomplete_requests(engine);
if (!rq) if (!rq)
goto out_replay; goto out_replay;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment