Commit 0606035f authored by Chris Wilson's avatar Chris Wilson

drm/i915: "Race-to-idle" after switching to the kernel context

During suspend we want to flush out all active contexts and their
rendering. To do so we queue a request from the kernel's context, once
we know that request is done, we know the GPU is completely idle. To
speed up that switch bump the GPU clocks.

Switching to the kernel context prior to idling is also used to enforce
a barrier before changing OA properties, and when evicting active
rendering from the global GTT. All cases where we do want to
race-to-idle.

v2: Limit the boosting to only the switch before suspend.
v3: Limit it to the wait-for-idle on suspend.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: David Weinehall <david.weinehall@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Tested-by: David Weinehall <david.weinehall@linux.intel.com> #v1
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180531082246.9763-2-chris@chris-wilson.co.uk
parent 4dfacb0b
...@@ -3708,7 +3708,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3708,7 +3708,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
{ {
return i915_gem_active_wait(&tl->last_request, flags); struct i915_request *rq;
long ret;
rq = i915_gem_active_get_unlocked(&tl->last_request);
if (!rq)
return 0;
/*
* "Race-to-idle".
*
* Switching to the kernel context is often used a synchronous
* step prior to idling, e.g. in suspend for flushing all
* current operations to memory before sleeping. These we
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq, NULL);
ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
return ret < 0 ? ret : 0;
} }
static int wait_for_engines(struct drm_i915_private *i915) static int wait_for_engines(struct drm_i915_private *i915)
...@@ -4983,7 +5005,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ...@@ -4983,7 +5005,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
ret = i915_gem_wait_for_idle(dev_priv, ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED); I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST);
if (ret && ret != -EIO) if (ret && ret != -EIO)
goto err_unlock; goto err_unlock;
......
...@@ -267,6 +267,7 @@ long i915_request_wait(struct i915_request *rq, ...@@ -267,6 +267,7 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment