Commit bf06112f authored by Chris Wilson's avatar Chris Wilson

drm/i915: Tidy i915_gem_suspend()

In the next patch, we will make a fairly minor change to flush
outstanding resets before suspend. In order to keep churn to a minimum
in that functional patch, we fix up the comments and coding style now.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180709130208.11730-7-chris@chris-wilson.co.uk
parent b7bb6138
...@@ -5029,32 +5029,32 @@ void i915_gem_sanitize(struct drm_i915_private *i915) ...@@ -5029,32 +5029,32 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
} }
int i915_gem_suspend(struct drm_i915_private *dev_priv) int i915_gem_suspend(struct drm_i915_private *i915)
{ {
struct drm_device *dev = &dev_priv->drm;
int ret; int ret;
GEM_TRACE("\n"); GEM_TRACE("\n");
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(i915);
intel_suspend_gt_powersave(dev_priv); intel_suspend_gt_powersave(i915);
mutex_lock(&dev->struct_mutex); mutex_lock(&i915->drm.struct_mutex);
/* We have to flush all the executing contexts to main memory so /*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last * that they can saved in the hibernation image. To ensure the last
* context image is coherent, we have to switch away from it. That * context image is coherent, we have to switch away from it. That
* leaves the dev_priv->kernel_context still active when * leaves the i915->kernel_context still active when
* we actually suspend, and its image in memory may not match the GPU * we actually suspend, and its image in memory may not match the GPU
* state. Fortunately, the kernel_context is disposable and we do * state. Fortunately, the kernel_context is disposable and we do
* not rely on its state. * not rely on its state.
*/ */
if (!i915_terminally_wedged(&dev_priv->gpu_error)) { if (!i915_terminally_wedged(&i915->gpu_error)) {
ret = i915_gem_switch_to_kernel_context(dev_priv); ret = i915_gem_switch_to_kernel_context(i915);
if (ret) if (ret)
goto err_unlock; goto err_unlock;
ret = i915_gem_wait_for_idle(dev_priv, ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED | I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST, I915_WAIT_FOR_IDLE_BOOST,
...@@ -5062,33 +5062,35 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ...@@ -5062,33 +5062,35 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
if (ret && ret != -EIO) if (ret && ret != -EIO)
goto err_unlock; goto err_unlock;
assert_kernel_context_is_current(dev_priv); assert_kernel_context_is_current(i915);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
intel_uc_suspend(dev_priv); intel_uc_suspend(i915);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work); cancel_delayed_work_sync(&i915->gt.retire_work);
/* As the idle_work is rearming if it detects a race, play safe and /*
* As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle. * repeat the flush until it is definitely idle.
*/ */
drain_delayed_work(&dev_priv->gt.idle_work); drain_delayed_work(&i915->gt.idle_work);
/* Assert that we sucessfully flushed all the work and /*
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state. * reset the GPU back to its idle, low power state.
*/ */
WARN_ON(dev_priv->gt.awake); WARN_ON(i915->gt.awake);
if (WARN_ON(!intel_engines_are_idle(dev_priv))) if (WARN_ON(!intel_engines_are_idle(i915)))
i915_gem_set_wedged(dev_priv); /* no hope, discard everything */ i915_gem_set_wedged(i915); /* no hope, discard everything */
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(i915);
return 0; return 0;
err_unlock: err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(i915);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment