Commit 0c91621c authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Pass intel_gt to pm routines

Switch from passing the i915 container to newly named struct intel_gt.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-2-chris@chris-wilson.co.uk
parent 8e9c2f62
...@@ -2437,7 +2437,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2437,7 +2437,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least * wakeref that we hold until the GPU has been idle for at least
* 100ms. * 100ms.
*/ */
intel_gt_pm_get(eb.i915); intel_gt_pm_get(&eb.i915->gt);
err = i915_mutex_lock_interruptible(dev); err = i915_mutex_lock_interruptible(dev);
if (err) if (err)
...@@ -2607,7 +2607,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2607,7 +2607,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err_unlock: err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
err_rpm: err_rpm:
intel_gt_pm_put(eb.i915); intel_gt_pm_put(&eb.i915->gt);
i915_gem_context_put(eb.gem_context); i915_gem_context_put(eb.gem_context);
err_destroy: err_destroy:
eb_destroy(&eb); eb_destroy(&eb);
......
...@@ -258,7 +258,7 @@ void i915_gem_resume(struct drm_i915_private *i915) ...@@ -258,7 +258,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset * guarantee that the context image is complete. So let's just reset
* it and start again. * it and start again.
*/ */
intel_gt_resume(i915); intel_gt_resume(&i915->gt);
if (i915_gem_init_hw(i915)) if (i915_gem_init_hw(i915))
goto err_wedged; goto err_wedged;
......
...@@ -379,7 +379,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) ...@@ -379,7 +379,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
{ {
i915_gem_shrinker_unregister(i915); i915_gem_shrinker_unregister(i915);
intel_gt_pm_get(i915); intel_gt_pm_get(&i915->gt);
cancel_delayed_work_sync(&i915->gem.retire_work); cancel_delayed_work_sync(&i915->gem.retire_work);
flush_work(&i915->gem.idle_work); flush_work(&i915->gem.idle_work);
...@@ -387,7 +387,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) ...@@ -387,7 +387,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
static void restore_retire_worker(struct drm_i915_private *i915) static void restore_retire_worker(struct drm_i915_private *i915)
{ {
intel_gt_pm_put(i915); intel_gt_pm_put(&i915->gt);
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
igt_flush_test(i915, I915_WAIT_LOCKED); igt_flush_test(i915, I915_WAIT_LOCKED);
......
...@@ -18,7 +18,7 @@ static int __engine_unpark(struct intel_wakeref *wf) ...@@ -18,7 +18,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name); GEM_TRACE("%s\n", engine->name);
intel_gt_pm_get(engine->i915); intel_gt_pm_get(engine->gt);
/* Pin the default state for fast resets from atomic context. */ /* Pin the default state for fast resets from atomic context. */
map = NULL; map = NULL;
...@@ -129,7 +129,7 @@ static int __engine_park(struct intel_wakeref *wf) ...@@ -129,7 +129,7 @@ static int __engine_park(struct intel_wakeref *wf)
engine->execlists.no_priolist = false; engine->execlists.no_priolist = false;
intel_gt_pm_put(engine->i915); intel_gt_pm_put(engine->gt);
return 0; return 0;
} }
...@@ -149,7 +149,7 @@ int intel_engines_resume(struct drm_i915_private *i915) ...@@ -149,7 +149,7 @@ int intel_engines_resume(struct drm_i915_private *i915)
enum intel_engine_id id; enum intel_engine_id id;
int err = 0; int err = 0;
intel_gt_pm_get(i915); intel_gt_pm_get(&i915->gt);
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
intel_engine_pm_get(engine); intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */ engine->serial++; /* kernel context lost */
...@@ -162,7 +162,7 @@ int intel_engines_resume(struct drm_i915_private *i915) ...@@ -162,7 +162,7 @@ int intel_engines_resume(struct drm_i915_private *i915)
break; break;
} }
} }
intel_gt_pm_put(i915); intel_gt_pm_put(&i915->gt);
return err; return err;
} }
...@@ -50,9 +50,11 @@ static int intel_gt_unpark(struct intel_wakeref *wf) ...@@ -50,9 +50,11 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
return 0; return 0;
} }
void intel_gt_pm_get(struct drm_i915_private *i915) void intel_gt_pm_get(struct intel_gt *gt)
{ {
intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark); struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
intel_wakeref_get(rpm, &gt->wakeref, intel_gt_unpark);
} }
static int intel_gt_park(struct intel_wakeref *wf) static int intel_gt_park(struct intel_wakeref *wf)
...@@ -75,9 +77,11 @@ static int intel_gt_park(struct intel_wakeref *wf) ...@@ -75,9 +77,11 @@ static int intel_gt_park(struct intel_wakeref *wf)
return 0; return 0;
} }
void intel_gt_pm_put(struct drm_i915_private *i915) void intel_gt_pm_put(struct intel_gt *gt)
{ {
intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park); struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
intel_wakeref_put(rpm, &gt->wakeref, intel_gt_park);
} }
void intel_gt_pm_init_early(struct intel_gt *gt) void intel_gt_pm_init_early(struct intel_gt *gt)
...@@ -96,7 +100,7 @@ static bool reset_engines(struct drm_i915_private *i915) ...@@ -96,7 +100,7 @@ static bool reset_engines(struct drm_i915_private *i915)
/** /**
* intel_gt_sanitize: called after the GPU has lost power * intel_gt_sanitize: called after the GPU has lost power
* @i915: the i915 device * @gt: the i915 GT container
* @force: ignore a failed reset and sanitize engine state anyway * @force: ignore a failed reset and sanitize engine state anyway
* *
* Anytime we reset the GPU, either with an explicit GPU reset or through a * Anytime we reset the GPU, either with an explicit GPU reset or through a
...@@ -104,21 +108,21 @@ static bool reset_engines(struct drm_i915_private *i915) ...@@ -104,21 +108,21 @@ static bool reset_engines(struct drm_i915_private *i915)
* to match. Note that calling intel_gt_sanitize() if the GPU has not * to match. Note that calling intel_gt_sanitize() if the GPU has not
* been reset results in much confusion! * been reset results in much confusion!
*/ */
void intel_gt_sanitize(struct drm_i915_private *i915, bool force) void intel_gt_sanitize(struct intel_gt *gt, bool force)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
GEM_TRACE("\n"); GEM_TRACE("\n");
if (!reset_engines(i915) && !force) if (!reset_engines(gt->i915) && !force)
return; return;
for_each_engine(engine, i915, id) for_each_engine(engine, gt->i915, id)
intel_engine_reset(engine, false); intel_engine_reset(engine, false);
} }
void intel_gt_resume(struct drm_i915_private *i915) void intel_gt_resume(struct intel_gt *gt)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -129,7 +133,7 @@ void intel_gt_resume(struct drm_i915_private *i915) ...@@ -129,7 +133,7 @@ void intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend, * Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin. * allowing us to fixup the user contexts on their first pin.
*/ */
for_each_engine(engine, i915, id) { for_each_engine(engine, gt->i915, id) {
struct intel_context *ce; struct intel_context *ce;
ce = engine->kernel_context; ce = engine->kernel_context;
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/types.h> #include <linux/types.h>
struct drm_i915_private;
struct intel_gt; struct intel_gt;
enum { enum {
...@@ -17,12 +16,12 @@ enum { ...@@ -17,12 +16,12 @@ enum {
INTEL_GT_PARK, INTEL_GT_PARK,
}; };
void intel_gt_pm_get(struct drm_i915_private *i915); void intel_gt_pm_get(struct intel_gt *gt);
void intel_gt_pm_put(struct drm_i915_private *i915); void intel_gt_pm_put(struct intel_gt *gt);
void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_sanitize(struct drm_i915_private *i915, bool force); void intel_gt_sanitize(struct intel_gt *gt, bool force);
void intel_gt_resume(struct drm_i915_private *i915); void intel_gt_resume(struct intel_gt *gt);
#endif /* INTEL_GT_PM_H */ #endif /* INTEL_GT_PM_H */
...@@ -714,7 +714,7 @@ static void reset_prepare(struct drm_i915_private *i915) ...@@ -714,7 +714,7 @@ static void reset_prepare(struct drm_i915_private *i915)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
intel_gt_pm_get(i915); intel_gt_pm_get(&i915->gt);
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
reset_prepare_engine(engine); reset_prepare_engine(engine);
...@@ -765,7 +765,7 @@ static void reset_finish(struct drm_i915_private *i915) ...@@ -765,7 +765,7 @@ static void reset_finish(struct drm_i915_private *i915)
reset_finish_engine(engine); reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine); intel_engine_signal_breadcrumbs(engine);
} }
intel_gt_pm_put(i915); intel_gt_pm_put(&i915->gt);
} }
static void nop_submit_request(struct i915_request *request) static void nop_submit_request(struct i915_request *request)
...@@ -891,7 +891,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) ...@@ -891,7 +891,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
} }
mutex_unlock(&i915->gt.timelines.mutex); mutex_unlock(&i915->gt.timelines.mutex);
intel_gt_sanitize(i915, false); intel_gt_sanitize(&i915->gt, false);
/* /*
* Undo nop_submit_request. We prevent all new i915 requests from * Undo nop_submit_request. We prevent all new i915 requests from
......
...@@ -2377,7 +2377,7 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -2377,7 +2377,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv); intel_power_domains_resume(dev_priv);
intel_gt_sanitize(dev_priv, true); intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
......
...@@ -1157,7 +1157,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) ...@@ -1157,7 +1157,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability * it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen. * of the reset, so this could be applied to even earlier gen.
*/ */
intel_gt_sanitize(i915, false); intel_gt_sanitize(&i915->gt, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(&i915->runtime_pm, wakeref);
......
...@@ -115,7 +115,7 @@ static void pm_resume(struct drm_i915_private *i915) ...@@ -115,7 +115,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works. * that runtime-pm just works.
*/ */
with_intel_runtime_pm(&i915->runtime_pm, wakeref) { with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(i915, false); intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915); i915_gem_sanitize(i915);
i915_gem_resume(i915); i915_gem_resume(i915);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment