Commit 5763ff04 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Avoid GPU stalls from kswapd

Exclude active GPU pages from the purview of the background shrinker
(kswapd), as these cause uncontrollable GPU stalls. Given that the
shrinker is rerun until the freelists are satisfied, we should have
opportunity in subsequent passes to recover the pages once idle. If the
machine does run out of memory entirely, we have the forced idling in the
oom-notifier as a means of releasing all the pages we can before an oom
is prematurely executed.

Note that this relies upon an up-front retire_requests to keep the
inactive list in shape, which was added in a previous patch, mostly as
execlist ctx pinning band-aids.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarDamien Lespiau <damien.lespiau@intel.com>
[danvet: Add note about retire_requests.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ce8daef3
...@@ -3212,6 +3212,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -3212,6 +3212,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_PURGEABLE 0x1 #define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2 #define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4 #define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
......
...@@ -126,6 +126,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -126,6 +126,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
obj->madv != I915_MADV_DONTNEED) obj->madv != I915_MADV_DONTNEED)
continue; continue;
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
drm_gem_object_reference(&obj->base); drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */ /* For the unbound phase, this should be a no-op! */
...@@ -164,7 +167,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -164,7 +167,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{ {
return i915_gem_shrink(dev_priv, -1UL, return i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
} }
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
...@@ -217,7 +222,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -217,7 +222,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->pages_pin_count == num_vma_bound(obj)) if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment