Commit 21ab4e74 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Objects on the unbound list may still have an active reference

Due to the lazy retirement semantics, even though we have unbound an
object, it may still hold onto an active reference. So in the debug code,
play safe.

v2: Export i915_gem_shrink() rather than opencoding it.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 344c5bbc
...@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
{ {
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
int ret; int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val); DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
...@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_RETIRE | DROP_ACTIVE)) if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
if (val & DROP_BOUND) { if (val & DROP_BOUND)
list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
global_list) {
struct i915_vma *vma, *v;
ret = 0; if (val & DROP_UNBOUND)
drm_gem_object_reference(&obj->base); i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
if (vma->pin_count)
continue;
ret = i915_vma_unbind(vma);
if (ret)
break;
}
drm_gem_object_unreference(&obj->base);
if (ret)
goto unlock;
}
}
if (val & DROP_UNBOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
if (obj->pages_pin_count == 0) {
ret = i915_gem_object_put_pages(obj);
if (ret)
goto unlock;
}
}
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -2368,6 +2368,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -2368,6 +2368,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
long target,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev); void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj, void i915_gem_object_init(struct drm_i915_gem_object *obj,
......
...@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, ...@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
static int i915_gem_shrinker_oom(struct notifier_block *nb, static int i915_gem_shrinker_oom(struct notifier_block *nb,
unsigned long event, unsigned long event,
void *ptr); void *ptr);
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static bool cpu_cache_is_coherent(struct drm_device *dev, static bool cpu_cache_is_coherent(struct drm_device *dev,
...@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
* offsets on purgeable objects by truncating it and marking it purged, * offsets on purgeable objects by truncating it and marking it purged,
* which prevents userspace from ever using that object again. * which prevents userspace from ever using that object again.
*/ */
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); i915_gem_shrink(dev_priv,
obj->base.size >> PAGE_SHIFT,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
ret = drm_gem_create_mmap_offset(&obj->base); ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC) if (ret != -ENOSPC)
goto out; goto out;
...@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
static unsigned long unsigned long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, i915_gem_shrink(struct drm_i915_private *dev_priv,
bool purgeable_only) long target, unsigned flags)
{ {
struct list_head still_in_list; const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
struct drm_i915_gem_object *obj;
unsigned long count = 0; unsigned long count = 0;
/* /*
...@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, ...@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
* dev->struct_mutex and so we won't ever be able to observe an * dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0. * object on the bound_list with a reference count equals 0.
*/ */
INIT_LIST_HEAD(&still_in_list); if (flags & I915_SHRINK_UNBOUND) {
while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { struct list_head still_in_list;
obj = list_first_entry(&dev_priv->mm.unbound_list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
if (!i915_gem_object_is_purgeable(obj) && purgeable_only) INIT_LIST_HEAD(&still_in_list);
continue; while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
struct drm_i915_gem_object *obj;
drm_gem_object_reference(&obj->base); obj = list_first_entry(&dev_priv->mm.unbound_list,
typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
if (i915_gem_object_put_pages(obj) == 0) if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
count += obj->base.size >> PAGE_SHIFT; continue;
drm_gem_object_unreference(&obj->base); drm_gem_object_reference(&obj->base);
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
drm_gem_object_unreference(&obj->base);
}
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
} }
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&still_in_list); if (flags & I915_SHRINK_BOUND) {
while (count < target && !list_empty(&dev_priv->mm.bound_list)) { struct list_head still_in_list;
struct i915_vma *vma, *v;
obj = list_first_entry(&dev_priv->mm.bound_list, INIT_LIST_HEAD(&still_in_list);
typeof(*obj), global_list); while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
list_move_tail(&obj->global_list, &still_in_list); struct drm_i915_gem_object *obj;
struct i915_vma *vma, *v;
if (!i915_gem_object_is_purgeable(obj) && purgeable_only) obj = list_first_entry(&dev_priv->mm.bound_list,
continue; typeof(*obj), global_list);
list_move_tail(&obj->global_list, &still_in_list);
drm_gem_object_reference(&obj->base); if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) drm_gem_object_reference(&obj->base);
if (i915_vma_unbind(vma))
break;
if (i915_gem_object_put_pages(obj) == 0) list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
count += obj->base.size >> PAGE_SHIFT; if (i915_vma_unbind(vma))
break;
drm_gem_object_unreference(&obj->base); if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
drm_gem_object_unreference(&obj->base);
}
list_splice(&still_in_list, &dev_priv->mm.bound_list);
} }
list_splice(&still_in_list, &dev_priv->mm.bound_list);
return count; return count;
} }
static unsigned long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
return __i915_gem_shrink(dev_priv, target, true);
}
static unsigned long static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv) i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{ {
i915_gem_evict_everything(dev_priv->dev); i915_gem_evict_everything(dev_priv->dev);
return __i915_gem_shrink(dev_priv, LONG_MAX, false); return i915_gem_shrink(dev_priv, LONG_MAX,
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
} }
static int static int
...@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) { if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count); i915_gem_shrink(dev_priv,
page_count,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
} }
if (IS_ERR(page)) { if (IS_ERR(page)) {
...@@ -5261,11 +5273,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -5261,11 +5273,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!i915_gem_shrinker_lock(dev, &unlock)) if (!i915_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP; return SHRINK_STOP;
freed = i915_gem_purge(dev_priv, sc->nr_to_scan); freed = i915_gem_shrink(dev_priv,
sc->nr_to_scan,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan) if (freed < sc->nr_to_scan)
freed += __i915_gem_shrink(dev_priv, freed += i915_gem_shrink(dev_priv,
sc->nr_to_scan - freed, sc->nr_to_scan - freed,
false); I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (unlock) if (unlock)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment