Commit 87701b4b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Only free the oldest stale object before a fresh allocation

Inspired by Tvrtko's critique of the reaping of the stale contexts
before allocating a new one, also limit the freed object reaping to the
oldest stale object before allocating a fresh object. Unlike contexts,
objects may have radically different sizes of backing storage, but
similar to contexts, while we want to prevent starvation due to
excessive freed lists, we also do not want to delay fresh allocations
for too long. Only freeing the oldest on the freed object list before
each allocation is a reasonable compromise.

v2: Only a single consumer of llist_del_first() is allowed (although
multiple llist_add are still allowed in parallel). Unlike
i915_gem_context, i915_gem_flush_free_objects() is itself not serialized
and so we need to add our own spinlock. Otherwise KASAN eventually spots
a use-after-free for the race on *first->next.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> #v1
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-8-chris@chris-wilson.co.uk
parent c5418a8b
...@@ -1513,6 +1513,7 @@ struct i915_gem_mm { ...@@ -1513,6 +1513,7 @@ struct i915_gem_mm {
*/ */
struct llist_head free_list; struct llist_head free_list;
struct work_struct free_work; struct work_struct free_work;
spinlock_t free_lock;
/** /**
* Small stash of WC pages * Small stash of WC pages
......
...@@ -4555,9 +4555,18 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915) ...@@ -4555,9 +4555,18 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{ {
struct llist_node *freed; struct llist_node *freed;
freed = llist_del_all(&i915->mm.free_list); /* Free the oldest, most stale object to keep the free_list short */
if (unlikely(freed)) freed = NULL;
if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
/* Only one consumer of llist_del_first() allowed */
spin_lock(&i915->mm.free_lock);
freed = llist_del_first(&i915->mm.free_list);
spin_unlock(&i915->mm.free_lock);
}
if (unlikely(freed)) {
freed->next = NULL;
__i915_gem_free_objects(i915, freed); __i915_gem_free_objects(i915, freed);
}
} }
static void __i915_gem_free_work(struct work_struct *work) static void __i915_gem_free_work(struct work_struct *work)
...@@ -5059,6 +5068,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) ...@@ -5059,6 +5068,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
spin_lock_init(&dev_priv->mm.obj_lock); spin_lock_init(&dev_priv->mm.obj_lock);
spin_lock_init(&dev_priv->mm.free_lock);
init_llist_head(&dev_priv->mm.free_list); init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment