Commit feed5c7b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pin the context as we work on it

Since we now allow the intel_context_unpin() to run unserialised, we
risk our operations under the intel_context_lock_pinned() being run as
the context is unpinned (and thus invalidating our state). We can
atomically acquire the pin, testing to see if it is pinned in the
process, thus ensuring that the state remains consistent during the
course of the whole operation.

Fixes: 84135022 ("drm/i915/gt: Drop mutex serialisation between context pin/unpin")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200109085142.871563-1-chris@chris-wilson.co.uk
parent 921f0c47
...@@ -1236,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) ...@@ -1236,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
* image, or into the registers directory, does not stick). Pristine * image, or into the registers directory, does not stick). Pristine
* and idle contexts will be configured on pinning. * and idle contexts will be configured on pinning.
*/ */
if (!intel_context_is_pinned(ce)) if (!intel_context_pin_if_active(ce))
return 0; return 0;
rq = intel_engine_create_kernel_request(ce->engine); rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq)) if (IS_ERR(rq)) {
return PTR_ERR(rq); ret = PTR_ERR(rq);
goto out_unpin;
}
/* Serialise with the remote context */ /* Serialise with the remote context */
ret = intel_context_prepare_remote_request(ce, rq); ret = intel_context_prepare_remote_request(ce, rq);
...@@ -1249,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) ...@@ -1249,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
ret = gen8_emit_rpcs_config(rq, ce, sseu); ret = gen8_emit_rpcs_config(rq, ce, sseu);
i915_request_add(rq); i915_request_add(rq);
out_unpin:
intel_context_unpin(ce);
return ret; return ret;
} }
......
...@@ -76,9 +76,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce) ...@@ -76,9 +76,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
int __intel_context_do_pin(struct intel_context *ce); int __intel_context_do_pin(struct intel_context *ce);
static inline bool intel_context_pin_if_active(struct intel_context *ce)
{
return atomic_inc_not_zero(&ce->pin_count);
}
static inline int intel_context_pin(struct intel_context *ce) static inline int intel_context_pin(struct intel_context *ce)
{ {
if (likely(atomic_inc_not_zero(&ce->pin_count))) if (likely(intel_context_pin_if_active(ce)))
return 0; return 0;
return __intel_context_do_pin(ce); return __intel_context_do_pin(ce);
......
...@@ -321,16 +321,15 @@ static void print_context_stats(struct seq_file *m, ...@@ -321,16 +321,15 @@ static void print_context_stats(struct seq_file *m,
for_each_gem_engine(ce, for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) { i915_gem_context_lock_engines(ctx), it) {
intel_context_lock_pinned(ce); if (intel_context_pin_if_active(ce)) {
if (intel_context_is_pinned(ce)) {
rcu_read_lock(); rcu_read_lock();
if (ce->state) if (ce->state)
per_file_stats(0, per_file_stats(0,
ce->state->obj, &kstats); ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats); per_file_stats(0, ce->ring->vma->obj, &kstats);
rcu_read_unlock(); rcu_read_unlock();
intel_context_unpin(ce);
} }
intel_context_unlock_pinned(ce);
} }
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
...@@ -1513,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1513,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_gem_engine(ce, for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) { i915_gem_context_lock_engines(ctx), it) {
intel_context_lock_pinned(ce); if (intel_context_pin_if_active(ce)) {
if (intel_context_is_pinned(ce)) {
seq_printf(m, "%s: ", ce->engine->name); seq_printf(m, "%s: ", ce->engine->name);
if (ce->state) if (ce->state)
describe_obj(m, ce->state->obj); describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring); describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n'); seq_putc(m, '\n');
intel_context_unpin(ce);
} }
intel_context_unlock_pinned(ce);
} }
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
......
...@@ -2159,8 +2159,6 @@ static int gen8_modify_context(struct intel_context *ce, ...@@ -2159,8 +2159,6 @@ static int gen8_modify_context(struct intel_context *ce,
struct i915_request *rq; struct i915_request *rq;
int err; int err;
lockdep_assert_held(&ce->pin_mutex);
rq = intel_engine_create_kernel_request(ce->engine); rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
...@@ -2203,17 +2201,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx, ...@@ -2203,17 +2201,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
if (ce->engine->class != RENDER_CLASS) if (ce->engine->class != RENDER_CLASS)
continue; continue;
err = intel_context_lock_pinned(ce); /* Otherwise OA settings will be set upon first use */
if (err) if (!intel_context_pin_if_active(ce))
break; continue;
flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
err = gen8_modify_context(ce, flex, count);
/* Otherwise OA settings will be set upon first use */ intel_context_unpin(ce);
if (intel_context_is_pinned(ce))
err = gen8_modify_context(ce, flex, count);
intel_context_unlock_pinned(ce);
if (err) if (err)
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment