Commit f75f9157 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Shrink search list for active timelines

When switching to the kernel context, we force the switch to occur after
all currently active requests (so that we know the GPU won't switch
immediately away and the kernel context remains current as we work). To
do so we have to inspect all the timelines and add a fence from the
active work to queue our switch afterwards. We can use the tracked set
of active rings to shrink our search for active timelines.

v2: Use a local to shrink the list_for_each_entry()
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180515143149.4795-1-chris@chris-wilson.co.uk
parent 01f83786
...@@ -596,41 +596,44 @@ last_request_on_engine(struct i915_timeline *timeline, ...@@ -596,41 +596,44 @@ last_request_on_engine(struct i915_timeline *timeline,
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine) static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
{ {
struct i915_timeline *timeline; struct list_head * const active_rings = &engine->i915->gt.active_rings;
struct intel_ring *ring;
list_for_each_entry(timeline, &engine->i915->gt.timelines, link) { lockdep_assert_held(&engine->i915->drm.struct_mutex);
if (last_request_on_engine(timeline, engine))
list_for_each_entry(ring, active_rings, active_link) {
if (last_request_on_engine(ring->timeline, engine))
return false; return false;
} }
return intel_engine_has_kernel_context(engine); return intel_engine_has_kernel_context(engine);
} }
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_timeline *timeline;
enum intel_engine_id id; enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
i915_retire_requests(dev_priv); i915_retire_requests(i915);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, i915, id) {
struct intel_ring *ring;
struct i915_request *rq; struct i915_request *rq;
if (engine_has_idle_kernel_context(engine)) if (engine_has_idle_kernel_context(engine))
continue; continue;
rq = i915_request_alloc(engine, dev_priv->kernel_context); rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
/* Queue this switch after all other activity */ /* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev; struct i915_request *prev;
prev = last_request_on_engine(timeline, engine); prev = last_request_on_engine(ring->timeline, engine);
if (prev) if (prev)
i915_sw_fence_await_sw_fence_gfp(&rq->submit, i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit, &prev->submit,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment