Commit fa9f6681 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Export intel_context_instance()

We want to pass in a intel_context into intel_context_pin() and that
requires us to first be able to lookup the intel_context!
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190426163336.15906-2-chris@chris-wilson.co.uk
parent 251d46b0
...@@ -104,7 +104,7 @@ void __intel_context_remove(struct intel_context *ce) ...@@ -104,7 +104,7 @@ void __intel_context_remove(struct intel_context *ce)
spin_unlock(&ctx->hw_contexts_lock); spin_unlock(&ctx->hw_contexts_lock);
} }
static struct intel_context * struct intel_context *
intel_context_instance(struct i915_gem_context *ctx, intel_context_instance(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
...@@ -112,7 +112,7 @@ intel_context_instance(struct i915_gem_context *ctx, ...@@ -112,7 +112,7 @@ intel_context_instance(struct i915_gem_context *ctx,
ce = intel_context_lookup(ctx, engine); ce = intel_context_lookup(ctx, engine);
if (likely(ce)) if (likely(ce))
return ce; return intel_context_get(ce);
ce = intel_context_alloc(); ce = intel_context_alloc();
if (!ce) if (!ce)
...@@ -125,7 +125,7 @@ intel_context_instance(struct i915_gem_context *ctx, ...@@ -125,7 +125,7 @@ intel_context_instance(struct i915_gem_context *ctx,
intel_context_free(ce); intel_context_free(ce);
GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos); GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
return pos; return intel_context_get(pos);
} }
struct intel_context * struct intel_context *
...@@ -139,30 +139,30 @@ intel_context_pin_lock(struct i915_gem_context *ctx, ...@@ -139,30 +139,30 @@ intel_context_pin_lock(struct i915_gem_context *ctx,
if (IS_ERR(ce)) if (IS_ERR(ce))
return ce; return ce;
if (mutex_lock_interruptible(&ce->pin_mutex)) if (mutex_lock_interruptible(&ce->pin_mutex)) {
intel_context_put(ce);
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
}
return ce; return ce;
} }
struct intel_context * void intel_context_pin_unlock(struct intel_context *ce)
intel_context_pin(struct i915_gem_context *ctx, __releases(ce->pin_mutex)
struct intel_engine_cs *engine)
{ {
struct intel_context *ce; mutex_unlock(&ce->pin_mutex);
int err; intel_context_put(ce);
}
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
if (likely(atomic_inc_not_zero(&ce->pin_count))) int __intel_context_do_pin(struct intel_context *ce)
return ce; {
int err;
if (mutex_lock_interruptible(&ce->pin_mutex)) if (mutex_lock_interruptible(&ce->pin_mutex))
return ERR_PTR(-EINTR); return -EINTR;
if (likely(!atomic_read(&ce->pin_count))) { if (likely(!atomic_read(&ce->pin_count))) {
struct i915_gem_context *ctx = ce->gem_context;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
err = 0; err = 0;
...@@ -172,7 +172,6 @@ intel_context_pin(struct i915_gem_context *ctx, ...@@ -172,7 +172,6 @@ intel_context_pin(struct i915_gem_context *ctx,
goto err; goto err;
i915_gem_context_get(ctx); i915_gem_context_get(ctx);
GEM_BUG_ON(ce->gem_context != ctx);
mutex_lock(&ctx->mutex); mutex_lock(&ctx->mutex);
list_add(&ce->active_link, &ctx->active_engines); list_add(&ce->active_link, &ctx->active_engines);
...@@ -186,11 +185,11 @@ intel_context_pin(struct i915_gem_context *ctx, ...@@ -186,11 +185,11 @@ intel_context_pin(struct i915_gem_context *ctx,
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
mutex_unlock(&ce->pin_mutex); mutex_unlock(&ce->pin_mutex);
return ce; return 0;
err: err:
mutex_unlock(&ce->pin_mutex); mutex_unlock(&ce->pin_mutex);
return ERR_PTR(err); return err;
} }
void intel_context_unpin(struct intel_context *ce) void intel_context_unpin(struct intel_context *ce)
......
...@@ -49,11 +49,7 @@ intel_context_is_pinned(struct intel_context *ce) ...@@ -49,11 +49,7 @@ intel_context_is_pinned(struct intel_context *ce)
return atomic_read(&ce->pin_count); return atomic_read(&ce->pin_count);
} }
static inline void intel_context_pin_unlock(struct intel_context *ce) void intel_context_pin_unlock(struct intel_context *ce);
__releases(ce->pin_mutex)
{
mutex_unlock(&ce->pin_mutex);
}
struct intel_context * struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx, __intel_context_insert(struct i915_gem_context *ctx,
...@@ -63,7 +59,18 @@ void ...@@ -63,7 +59,18 @@ void
__intel_context_remove(struct intel_context *ce); __intel_context_remove(struct intel_context *ce);
struct intel_context * struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); intel_context_instance(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
int __intel_context_do_pin(struct intel_context *ce);
static inline int intel_context_pin(struct intel_context *ce)
{
if (likely(atomic_inc_not_zero(&ce->pin_count)))
return 0;
return __intel_context_do_pin(ce);
}
static inline void __intel_context_pin(struct intel_context *ce) static inline void __intel_context_pin(struct intel_context *ce)
{ {
......
...@@ -713,11 +713,17 @@ static int pin_context(struct i915_gem_context *ctx, ...@@ -713,11 +713,17 @@ static int pin_context(struct i915_gem_context *ctx,
struct intel_context **out) struct intel_context **out)
{ {
struct intel_context *ce; struct intel_context *ce;
int err;
ce = intel_context_pin(ctx, engine); ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce)) if (IS_ERR(ce))
return PTR_ERR(ce); return PTR_ERR(ce);
err = intel_context_pin(ce);
intel_context_put(ce);
if (err)
return err;
*out = ce; *out = ce;
return 0; return 0;
} }
......
...@@ -239,6 +239,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, ...@@ -239,6 +239,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
int id) int id)
{ {
struct mock_engine *engine; struct mock_engine *engine;
int err;
GEM_BUG_ON(id >= I915_NUM_ENGINES); GEM_BUG_ON(id >= I915_NUM_ENGINES);
...@@ -278,10 +279,15 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, ...@@ -278,10 +279,15 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
INIT_LIST_HEAD(&engine->hw_queue); INIT_LIST_HEAD(&engine->hw_queue);
engine->base.kernel_context = engine->base.kernel_context =
intel_context_pin(i915->kernel_context, &engine->base); intel_context_instance(i915->kernel_context, &engine->base);
if (IS_ERR(engine->base.kernel_context)) if (IS_ERR(engine->base.kernel_context))
goto err_breadcrumbs; goto err_breadcrumbs;
err = intel_context_pin(engine->base.kernel_context);
intel_context_put(engine->base.kernel_context);
if (err)
goto err_breadcrumbs;
return &engine->base; return &engine->base;
err_breadcrumbs: err_breadcrumbs:
......
...@@ -1183,12 +1183,17 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1183,12 +1183,17 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&s->workload_q_head[i]); INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL); s->shadow[i] = ERR_PTR(-EINVAL);
ce = intel_context_pin(ctx, engine); ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
ret = PTR_ERR(ce); ret = PTR_ERR(ce);
goto out_shadow_ctx; goto out_shadow_ctx;
} }
ret = intel_context_pin(ce);
intel_context_put(ce);
if (ret)
goto out_shadow_ctx;
s->shadow[i] = ce; s->shadow[i] = ce;
} }
......
...@@ -2100,14 +2100,19 @@ static int eb_pin_context(struct i915_execbuffer *eb, ...@@ -2100,14 +2100,19 @@ static int eb_pin_context(struct i915_execbuffer *eb,
if (err) if (err)
return err; return err;
ce = intel_context_instance(eb->gem_context, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
/* /*
* Pinning the contexts may generate requests in order to acquire * Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for * GGTT space, so do this first before we reserve a seqno for
* ourselves. * ourselves.
*/ */
ce = intel_context_pin(eb->gem_context, engine); err = intel_context_pin(ce);
if (IS_ERR(ce)) intel_context_put(ce);
return PTR_ERR(ce); if (err)
return err;
eb->engine = engine; eb->engine = engine;
eb->context = ce; eb->context = ce;
......
...@@ -1205,11 +1205,17 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915, ...@@ -1205,11 +1205,17 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
{ {
struct intel_engine_cs *engine = i915->engine[RCS0]; struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce; struct intel_context *ce;
int ret; int err;
ret = i915_mutex_lock_interruptible(&i915->drm); ce = intel_context_instance(ctx, engine);
if (ret) if (IS_ERR(ce))
return ERR_PTR(ret); return ce;
err = i915_mutex_lock_interruptible(&i915->drm);
if (err) {
intel_context_put(ce);
return ERR_PTR(err);
}
/* /*
* As the ID is the gtt offset of the context's vma we * As the ID is the gtt offset of the context's vma we
...@@ -1217,10 +1223,11 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915, ...@@ -1217,10 +1223,11 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
* *
* NB: implied RCS engine... * NB: implied RCS engine...
*/ */
ce = intel_context_pin(ctx, engine); err = intel_context_pin(ce);
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ce)) intel_context_put(ce);
return ce; if (err)
return ERR_PTR(err);
i915->perf.oa.pinned_ctx = ce; i915->perf.oa.pinned_ctx = ce;
......
...@@ -785,6 +785,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -785,6 +785,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct intel_context *ce; struct intel_context *ce;
struct i915_request *rq; struct i915_request *rq;
int err;
/* /*
* Preempt contexts are reserved for exclusive use to inject a * Preempt contexts are reserved for exclusive use to inject a
...@@ -798,13 +799,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -798,13 +799,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for * GGTT space, so do this first before we reserve a seqno for
* ourselves. * ourselves.
*/ */
ce = intel_context_pin(ctx, engine); ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce)) if (IS_ERR(ce))
return ERR_CAST(ce); return ERR_CAST(ce);
err = intel_context_pin(ce);
if (err) {
rq = ERR_PTR(err);
goto err_put;
}
rq = i915_request_create(ce); rq = i915_request_create(ce);
intel_context_unpin(ce); intel_context_unpin(ce);
err_put:
intel_context_put(ce);
return rq; return rq;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment