Commit b11b28ea authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Pull context activation into central intel_context_pin()

While this is encroaching on midlayer territory, having already made the
state allocation a previous step in pinning, we can now pull the common
intel_context_active_acquire() into intel_context_pin() itself. This is
a prelude to make the activation a separate step inside pinning, outside
of the ce->pin_mutex
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200109085717.873326-2-chris@chris-wilson.co.uk
parent 89f98d63
......@@ -63,6 +63,34 @@ int intel_context_alloc_state(struct intel_context *ce)
return err;
}
static int intel_context_active_acquire(struct intel_context *ce)
{
int err;
err = i915_active_acquire(&ce->active);
if (err)
return err;
/* Preallocate tracking nodes */
if (!intel_context_is_barrier(ce)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err) {
i915_active_release(&ce->active);
return err;
}
}
return 0;
}
static void intel_context_active_release(struct intel_context *ce)
{
/* Nodes preallocated in intel_context_active() */
i915_active_acquire_barrier(&ce->active);
i915_active_release(&ce->active);
}
int __intel_context_do_pin(struct intel_context *ce)
{
int err;
......@@ -79,11 +107,14 @@ int __intel_context_do_pin(struct intel_context *ce)
if (likely(!atomic_read(&ce->pin_count))) {
intel_wakeref_t wakeref;
err = 0;
err = intel_context_active_acquire(ce);
if (unlikely(err))
goto err;
with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
if (unlikely(err))
goto err_active;
CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
ce->ring->head, ce->ring->tail);
......@@ -97,6 +128,8 @@ int __intel_context_do_pin(struct intel_context *ce)
mutex_unlock(&ce->pin_mutex);
return 0;
err_active:
intel_context_active_release(ce);
err:
mutex_unlock(&ce->pin_mutex);
return err;
......@@ -198,34 +231,6 @@ static int __intel_context_active(struct i915_active *active)
return err;
}
int intel_context_active_acquire(struct intel_context *ce)
{
int err;
err = i915_active_acquire(&ce->active);
if (err)
return err;
/* Preallocate tracking nodes */
if (!intel_context_is_barrier(ce)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err) {
i915_active_release(&ce->active);
return err;
}
}
return 0;
}
void intel_context_active_release(struct intel_context *ce)
{
/* Nodes preallocated in intel_context_active() */
i915_active_acquire_barrier(&ce->active);
i915_active_release(&ce->active);
}
void
intel_context_init(struct intel_context *ce,
struct intel_engine_cs *engine)
......
......@@ -123,9 +123,6 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce);
}
int intel_context_active_acquire(struct intel_context *ce);
void intel_context_active_release(struct intel_context *ce);
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
......
......@@ -2562,33 +2562,21 @@ __execlists_context_pin(struct intel_context *ce,
struct intel_engine_cs *engine)
{
void *vaddr;
int ret;
GEM_BUG_ON(!ce->state);
ret = intel_context_active_acquire(ce);
if (ret)
goto err;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_active;
}
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
return 0;
unpin_active:
intel_context_active_release(ce);
err:
return ret;
}
static int execlists_context_pin(struct intel_context *ce)
......
......@@ -1329,21 +1329,7 @@ static int ring_context_alloc(struct intel_context *ce)
static int ring_context_pin(struct intel_context *ce)
{
int err;
err = intel_context_active_acquire(ce);
if (err)
return err;
err = __context_pin_ppgtt(ce);
if (err)
goto err_active;
return 0;
err_active:
intel_context_active_release(ce);
return err;
return __context_pin_ppgtt(ce);
}
static void ring_context_reset(struct intel_context *ce)
......
......@@ -149,7 +149,7 @@ static int mock_context_alloc(struct intel_context *ce)
static int mock_context_pin(struct intel_context *ce)
{
return intel_context_active_acquire(ce);
return 0;
}
static void mock_context_reset(struct intel_context *ce)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment