Commit ceabbba5 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Include bound and active pages in the count of shrinkable objects

When the machine is under a lot of memory pressure and being stressed by
multiple GPU threads, we quite often report fewer than shrinker->batch
(i.e. SHRINK_BATCH) pages to be freed. This causes the shrink_control to
skip calling into i915.ko to release pages, despite the GPU holding onto
most of the physical pages in its active lists.

References: https://bugs.freedesktop.org/show_bug.cgi?id=72742Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarRobert Beckett <robert.beckett@intel.com>
Reviewed-by: default avatarRafael Barbalho <rafael.barbalho@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 0820baf3
......@@ -1741,8 +1741,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_power_domains_remove(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev_priv->mm.shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.shrinker);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
......@@ -1793,8 +1793,8 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev_priv->mm.shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.shrinker);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
......
......@@ -1057,7 +1057,7 @@ struct i915_gem_mm {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
struct shrinker shrinker;
bool shrinker_no_lock_stealing;
/** LRU list of objects with fence regs on them. */
......
......@@ -57,9 +57,9 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
......@@ -4739,10 +4739,10 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.shrinker);
}
/*
......@@ -5001,13 +5001,23 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
count++;
return count;
}
static unsigned long
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
bool unlock = true;
......@@ -5029,10 +5039,8 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;
if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
if (!i915_gem_obj_is_pinned(obj) &&
obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}
......@@ -5105,12 +5113,10 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
}
static unsigned long
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_device *dev = dev_priv->dev;
unsigned long freed;
bool unlock = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment