Commit aa7b93eb authored by Chris Wilson's avatar Chris Wilson Committed by Andrzej Hajda

drm/i915/gt: Hold a wakeref for the active VM

There may be a disconnect between the GT used by the engine and the GT
used for the VM, requiring us to hold a wakeref on both while the GPU is
active with this request.

v2: added explanation to __queue_and_release_pm
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
[ahajda: removed not-yet-upstremed wakeref tracking bits]
Signed-off-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230330-hold_wakeref_for_active_vm-v2-1-724d201499c2@intel.com
parent a915450e
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_context_types.h" #include "intel_context_types.h"
#include "intel_engine_types.h" #include "intel_engine_types.h"
#include "intel_gt_pm.h"
#include "intel_ring_types.h" #include "intel_ring_types.h"
#include "intel_timeline_types.h" #include "intel_timeline_types.h"
#include "i915_trace.h" #include "i915_trace.h"
...@@ -207,8 +208,11 @@ void intel_context_exit_engine(struct intel_context *ce); ...@@ -207,8 +208,11 @@ void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce) static inline void intel_context_enter(struct intel_context *ce)
{ {
lockdep_assert_held(&ce->timeline->mutex); lockdep_assert_held(&ce->timeline->mutex);
if (!ce->active_count++) if (ce->active_count++)
return;
ce->ops->enter(ce); ce->ops->enter(ce);
intel_gt_pm_get(ce->vm->gt);
} }
static inline void intel_context_mark_active(struct intel_context *ce) static inline void intel_context_mark_active(struct intel_context *ce)
...@@ -222,7 +226,10 @@ static inline void intel_context_exit(struct intel_context *ce) ...@@ -222,7 +226,10 @@ static inline void intel_context_exit(struct intel_context *ce)
{ {
lockdep_assert_held(&ce->timeline->mutex); lockdep_assert_held(&ce->timeline->mutex);
GEM_BUG_ON(!ce->active_count); GEM_BUG_ON(!ce->active_count);
if (!--ce->active_count) if (--ce->active_count)
return;
intel_gt_pm_put_async(ce->vm->gt);
ce->ops->exit(ce); ce->ops->exit(ce);
} }
......
...@@ -114,6 +114,15 @@ __queue_and_release_pm(struct i915_request *rq, ...@@ -114,6 +114,15 @@ __queue_and_release_pm(struct i915_request *rq,
ENGINE_TRACE(engine, "parking\n"); ENGINE_TRACE(engine, "parking\n");
/*
* Open coded one half of intel_context_enter, which we have to omit
* here (see the large comment below) and because the other part must
* not be called due constructing directly with __i915_request_create
* which increments active count via intel_context_mark_active.
*/
GEM_BUG_ON(rq->context->active_count != 1);
__intel_gt_pm_get(engine->gt);
/* /*
* We have to serialise all potential retirement paths with our * We have to serialise all potential retirement paths with our
* submission, as we don't want to underflow either the * submission, as we don't want to underflow either the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment