Commit 25d851ad authored by Chris Wilson's avatar Chris Wilson

drm/i915: Only reschedule the submission tasklet if preemption is possible

If we couple the scheduler more tightly with the execlists policy, we
can apply the preemption policy to the question of whether we need to
kick the tasklet at all for this priority bump.

v2: Rephrase it as a core i915 policy and not an execlists foible.
v3: Pull the kick together.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190507122544.12698-1-chris@chris-wilson.co.uk
parent 39705649
...@@ -106,24 +106,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) ...@@ -106,24 +106,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915); void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
static inline bool __execlists_need_preempt(int prio, int last)
{
/*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*
* More naturally we would write
* prio >= max(0, last);
* except that we wish to prevent triggering preemption at the same
* priority level: the task that is running should remain running
* to preserve FIFO ordering of dependencies.
*/
return prio > max(I915_PRIORITY_NORMAL - 1, last);
}
static inline void static inline void
execlists_set_active(struct intel_engine_execlists *execlists, execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit) unsigned int bit)
......
...@@ -252,8 +252,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, ...@@ -252,8 +252,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* ourselves, ignore the request. * ourselves, ignore the request.
*/ */
last_prio = effective_prio(rq); last_prio = effective_prio(rq);
if (!__execlists_need_preempt(engine->execlists.queue_priority_hint, if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
last_prio)) last_prio))
return false; return false;
/* /*
......
...@@ -638,14 +638,19 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine) ...@@ -638,14 +638,19 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
GEM_BUG_ON(i915_request_completed(rq)); GEM_BUG_ON(i915_request_completed(rq));
i915_sw_fence_init(&rq->submit, dummy_notify); i915_sw_fence_init(&rq->submit, dummy_notify);
i915_sw_fence_commit(&rq->submit); set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
return rq; return rq;
} }
static void dummy_request_free(struct i915_request *dummy) static void dummy_request_free(struct i915_request *dummy)
{ {
/* We have to fake the CS interrupt to kick the next request */
i915_sw_fence_commit(&dummy->submit);
i915_request_mark_complete(dummy); i915_request_mark_complete(dummy);
dma_fence_signal(&dummy->fence);
i915_sched_node_fini(&dummy->sched); i915_sched_node_fini(&dummy->sched);
i915_sw_fence_fini(&dummy->submit); i915_sw_fence_fini(&dummy->submit);
......
...@@ -1415,9 +1415,7 @@ long i915_request_wait(struct i915_request *rq, ...@@ -1415,9 +1415,7 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_PRIORITY) { if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq); gen6_rps_boost(rq);
local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
local_bh_enable(); /* kick tasklets en masse */
} }
wait.tsk = current; wait.tsk = current;
......
...@@ -261,16 +261,27 @@ sched_lock_engine(const struct i915_sched_node *node, ...@@ -261,16 +261,27 @@ sched_lock_engine(const struct i915_sched_node *node,
return engine; return engine;
} }
static bool inflight(const struct i915_request *rq, static inline int rq_prio(const struct i915_request *rq)
const struct intel_engine_cs *engine)
{ {
const struct i915_request *active; return rq->sched.attr.priority | __NO_PREEMPTION;
}
static void kick_submission(struct intel_engine_cs *engine, int prio)
{
const struct i915_request *inflight =
port_request(engine->execlists.port);
if (!i915_request_is_active(rq)) /*
return false; * If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves, or if
* we expect nothing to change as a result of running the
* tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context.
*/
if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
return;
active = port_request(engine->execlists.port); tasklet_hi_schedule(&engine->execlists.tasklet);
return active->hw_context == rq->hw_context;
} }
static void __i915_schedule(struct i915_request *rq, static void __i915_schedule(struct i915_request *rq,
...@@ -396,15 +407,8 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -396,15 +407,8 @@ static void __i915_schedule(struct i915_request *rq,
engine->execlists.queue_priority_hint = prio; engine->execlists.queue_priority_hint = prio;
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
if (inflight(node_to_request(node), engine))
continue;
/* Defer (tasklet) submission until after all of our updates. */ /* Defer (tasklet) submission until after all of our updates. */
tasklet_hi_schedule(&engine->execlists.tasklet); kick_submission(engine, prio);
} }
spin_unlock(&engine->timeline.lock); spin_unlock(&engine->timeline.lock);
......
...@@ -52,4 +52,22 @@ static inline void i915_priolist_free(struct i915_priolist *p) ...@@ -52,4 +52,22 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p); __i915_priolist_free(p);
} }
static inline bool i915_scheduler_need_preempt(int prio, int active)
{
/*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*
* More naturally we would write
* prio >= max(0, last);
* except that we wish to prevent triggering preemption at the same
* priority level: the task that is running should remain running
* to preserve FIFO ordering of dependencies.
*/
return prio > max(I915_PRIORITY_NORMAL - 1, active);
}
#endif /* _I915_SCHEDULER_H_ */ #endif /* _I915_SCHEDULER_H_ */
...@@ -747,7 +747,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) ...@@ -747,7 +747,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
&engine->i915->guc.preempt_work[engine->id]; &engine->i915->guc.preempt_work[engine->id];
int prio = execlists->queue_priority_hint; int prio = execlists->queue_priority_hint;
if (__execlists_need_preempt(prio, port_prio(port))) { if (i915_scheduler_need_preempt(prio,
port_prio(port))) {
execlists_set_active(execlists, execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT); EXECLISTS_ACTIVE_PREEMPT);
queue_work(engine->i915->guc.preempt_wq, queue_work(engine->i915->guc.preempt_wq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment