Commit cf41a8f1 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915: Finally remove obj->mm.lock.

With all callers and selftests fixed to use ww locking, we can now
finally remove this lock.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-62-maarten.lankhorst@linux.intel.com
parent 480ae795
...@@ -62,8 +62,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -62,8 +62,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops, const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key, unsigned flags) struct lock_class_key *key, unsigned flags)
{ {
mutex_init(&obj->mm.lock);
spin_lock_init(&obj->vma.lock); spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list); INIT_LIST_HEAD(&obj->vma.list);
......
...@@ -129,7 +129,7 @@ static inline void assert_object_held_shared(struct drm_i915_gem_object *obj) ...@@ -129,7 +129,7 @@ static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
*/ */
if (IS_ENABLED(CONFIG_LOCKDEP) && if (IS_ENABLED(CONFIG_LOCKDEP) &&
kref_read(&obj->base.refcount) > 0) kref_read(&obj->base.refcount) > 0)
lockdep_assert_held(&obj->mm.lock); assert_object_held(obj);
} }
static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
...@@ -358,7 +358,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); ...@@ -358,7 +358,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
might_lock(&obj->mm.lock); assert_object_held(obj);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0; return 0;
...@@ -404,7 +404,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) ...@@ -404,7 +404,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
......
...@@ -216,7 +216,6 @@ struct drm_i915_gem_object { ...@@ -216,7 +216,6 @@ struct drm_i915_gem_object {
* Protects the pages and their use. Do not use directly, but * Protects the pages and their use. Do not use directly, but
* instead go through the pin/unpin interfaces. * instead go through the pin/unpin interfaces.
*/ */
struct mutex lock;
atomic_t pages_pin_count; atomic_t pages_pin_count;
atomic_t shrink_pin; atomic_t shrink_pin;
......
...@@ -70,7 +70,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -70,7 +70,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct list_head *list; struct list_head *list;
unsigned long flags; unsigned long flags;
lockdep_assert_held(&obj->mm.lock); assert_object_held(obj);
spin_lock_irqsave(&i915->mm.obj_lock, flags); spin_lock_irqsave(&i915->mm.obj_lock, flags);
i915->mm.shrink_count++; i915->mm.shrink_count++;
...@@ -117,9 +117,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -117,9 +117,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{ {
int err; int err;
err = mutex_lock_interruptible(&obj->mm.lock); assert_object_held(obj);
if (err)
return err;
assert_object_held_shared(obj); assert_object_held_shared(obj);
...@@ -128,15 +126,13 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -128,15 +126,13 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
err = ____i915_gem_object_get_pages(obj); err = ____i915_gem_object_get_pages(obj);
if (err) if (err)
goto unlock; return err;
smp_mb__before_atomic(); smp_mb__before_atomic();
} }
atomic_inc(&obj->mm.pages_pin_count); atomic_inc(&obj->mm.pages_pin_count);
unlock: return 0;
mutex_unlock(&obj->mm.lock);
return err;
} }
int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
...@@ -223,7 +219,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) ...@@ -223,7 +219,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages; return pages;
} }
int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj) int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{ {
struct sg_table *pages; struct sg_table *pages;
...@@ -254,21 +250,6 @@ int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj) ...@@ -254,21 +250,6 @@ int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
int err;
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock(&obj->mm.lock);
err = __i915_gem_object_put_pages_locked(obj);
mutex_unlock(&obj->mm.lock);
return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */ /* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
enum i915_map_type type) enum i915_map_type type)
...@@ -371,9 +352,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -371,9 +352,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock); assert_object_held(obj);
if (err)
return ERR_PTR(err);
pinned = !(type & I915_MAP_OVERRIDE); pinned = !(type & I915_MAP_OVERRIDE);
type &= ~I915_MAP_OVERRIDE; type &= ~I915_MAP_OVERRIDE;
...@@ -383,10 +362,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -383,10 +362,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj); err = ____i915_gem_object_get_pages(obj);
if (err) { if (err)
ptr = ERR_PTR(err); return ERR_PTR(err);
goto out_unlock;
}
smp_mb__before_atomic(); smp_mb__before_atomic();
} }
...@@ -421,13 +398,11 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -421,13 +398,11 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
obj->mm.mapping = page_pack_bits(ptr, type); obj->mm.mapping = page_pack_bits(ptr, type);
} }
out_unlock:
mutex_unlock(&obj->mm.lock);
return ptr; return ptr;
err_unpin: err_unpin:
atomic_dec(&obj->mm.pages_pin_count); atomic_dec(&obj->mm.pages_pin_count);
goto out_unlock; return ptr;
} }
void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
......
...@@ -234,40 +234,22 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) ...@@ -234,40 +234,22 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (err) if (err)
return err; return err;
err = mutex_lock_interruptible(&obj->mm.lock); if (obj->mm.madv != I915_MADV_WILLNEED)
if (err) return -EFAULT;
return err;
if (unlikely(!i915_gem_object_has_struct_page(obj)))
goto out;
if (obj->mm.madv != I915_MADV_WILLNEED) {
err = -EFAULT;
goto out;
}
if (i915_gem_object_has_tiling_quirk(obj)) { if (i915_gem_object_has_tiling_quirk(obj))
err = -EFAULT; return -EFAULT;
goto out;
}
if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj)) { if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
err = -EBUSY; return -EBUSY;
goto out;
}
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
drm_dbg(obj->base.dev, drm_dbg(obj->base.dev,
"Attempting to obtain a purgeable object\n"); "Attempting to obtain a purgeable object\n");
err = -EFAULT; return -EFAULT;
goto out;
} }
err = i915_gem_object_shmem_to_phys(obj); return i915_gem_object_shmem_to_phys(obj);
out:
mutex_unlock(&obj->mm.lock);
return err;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
......
...@@ -116,7 +116,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) ...@@ -116,7 +116,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
*/ */
with_intel_runtime_pm(&i915->runtime_pm, wakeref) with_intel_runtime_pm(&i915->runtime_pm, wakeref)
i915_gem_shrink(i915, -1UL, NULL, ~0); i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915); i915_gem_drain_freed_objects(i915);
wbinvd_on_all_cpus(); wbinvd_on_all_cpus();
......
...@@ -99,7 +99,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) ...@@ -99,7 +99,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
goto err_sg; goto err_sg;
} }
i915_gem_shrink(i915, 2 * page_count, NULL, *s++); i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
/* /*
* We've tried hard to allocate the memory by reaping * We've tried hard to allocate the memory by reaping
......
...@@ -94,7 +94,8 @@ static void try_to_writeback(struct drm_i915_gem_object *obj, ...@@ -94,7 +94,8 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
* The number of pages of backing storage actually released. * The number of pages of backing storage actually released.
*/ */
unsigned long unsigned long
i915_gem_shrink(struct drm_i915_private *i915, i915_gem_shrink(struct i915_gem_ww_ctx *ww,
struct drm_i915_private *i915,
unsigned long target, unsigned long target,
unsigned long *nr_scanned, unsigned long *nr_scanned,
unsigned int shrink) unsigned int shrink)
...@@ -113,6 +114,7 @@ i915_gem_shrink(struct drm_i915_private *i915, ...@@ -113,6 +114,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0; intel_wakeref_t wakeref = 0;
unsigned long count = 0; unsigned long count = 0;
unsigned long scanned = 0; unsigned long scanned = 0;
int err;
trace_i915_gem_shrink(i915, target, shrink); trace_i915_gem_shrink(i915, target, shrink);
...@@ -200,25 +202,40 @@ i915_gem_shrink(struct drm_i915_private *i915, ...@@ -200,25 +202,40 @@ i915_gem_shrink(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags); spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (unsafe_drop_pages(obj, shrink) && err = 0;
mutex_trylock(&obj->mm.lock)) { if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */ /* May arrive from get_pages on another bo */
if (!__i915_gem_object_put_pages_locked(obj)) { if (!ww) {
if (!i915_gem_object_trylock(obj))
goto skip;
} else {
err = i915_gem_object_lock(obj, ww);
if (err)
goto skip;
}
if (!__i915_gem_object_put_pages(obj)) {
try_to_writeback(obj, shrink); try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
mutex_unlock(&obj->mm.lock); if (!ww)
i915_gem_object_unlock(obj);
} }
dma_resv_prune(obj->base.resv); dma_resv_prune(obj->base.resv);
scanned += obj->base.size >> PAGE_SHIFT; scanned += obj->base.size >> PAGE_SHIFT;
skip:
i915_gem_object_put(obj); i915_gem_object_put(obj);
spin_lock_irqsave(&i915->mm.obj_lock, flags); spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (err)
break;
} }
list_splice_tail(&still_in_list, phase->list); list_splice_tail(&still_in_list, phase->list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags); spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (err)
return err;
} }
if (shrink & I915_SHRINK_BOUND) if (shrink & I915_SHRINK_BOUND)
...@@ -249,7 +266,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) ...@@ -249,7 +266,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
unsigned long freed = 0; unsigned long freed = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) { with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL, freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND); I915_SHRINK_UNBOUND);
} }
...@@ -295,7 +312,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -295,7 +312,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_scanned = 0; sc->nr_scanned = 0;
freed = i915_gem_shrink(i915, freed = i915_gem_shrink(NULL, i915,
sc->nr_to_scan, sc->nr_to_scan,
&sc->nr_scanned, &sc->nr_scanned,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
...@@ -304,7 +321,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -304,7 +321,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) { with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed += i915_gem_shrink(i915, freed += i915_gem_shrink(NULL, i915,
sc->nr_to_scan - sc->nr_scanned, sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned, &sc->nr_scanned,
I915_SHRINK_ACTIVE | I915_SHRINK_ACTIVE |
...@@ -329,7 +346,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -329,7 +346,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0; freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL, freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK); I915_SHRINK_WRITEBACK);
...@@ -367,7 +384,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -367,7 +384,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL, freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_VMAPS); I915_SHRINK_VMAPS);
......
...@@ -9,10 +9,12 @@ ...@@ -9,10 +9,12 @@
#include <linux/bits.h> #include <linux/bits.h>
struct drm_i915_private; struct drm_i915_private;
struct i915_gem_ww_ctx;
struct mutex; struct mutex;
/* i915_gem_shrinker.c */ /* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *i915, unsigned long i915_gem_shrink(struct i915_gem_ww_ctx *ww,
struct drm_i915_private *i915,
unsigned long target, unsigned long target,
unsigned long *nr_scanned, unsigned long *nr_scanned,
unsigned flags); unsigned flags);
......
...@@ -265,7 +265,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, ...@@ -265,7 +265,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* pages to prevent them being swapped out and causing corruption * pages to prevent them being swapped out and causing corruption
* due to the change in swizzling. * due to the change in swizzling.
*/ */
mutex_lock(&obj->mm.lock);
if (i915_gem_object_has_pages(obj) && if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED && obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
...@@ -280,7 +279,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, ...@@ -280,7 +279,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
i915_gem_object_set_tiling_quirk(obj); i915_gem_object_set_tiling_quirk(obj);
} }
} }
mutex_unlock(&obj->mm.lock);
spin_lock(&obj->vma.lock); spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) { for_each_ggtt_vma(vma, obj) {
......
...@@ -253,7 +253,7 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool ...@@ -253,7 +253,7 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool
if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj))) if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
return -EBUSY; return -EBUSY;
mutex_lock(&obj->mm.lock); assert_object_held(obj);
pages = __i915_gem_object_unset_pages(obj); pages = __i915_gem_object_unset_pages(obj);
if (!IS_ERR_OR_NULL(pages)) if (!IS_ERR_OR_NULL(pages))
...@@ -261,7 +261,6 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool ...@@ -261,7 +261,6 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool
if (get_pages) if (get_pages)
err = ____i915_gem_object_get_pages(obj); err = ____i915_gem_object_get_pages(obj);
mutex_unlock(&obj->mm.lock);
return err; return err;
} }
......
...@@ -904,10 +904,10 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -904,10 +904,10 @@ i915_drop_caches_set(void *data, u64 val)
fs_reclaim_acquire(GFP_KERNEL); fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND) if (val & DROP_BOUND)
i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND); i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
if (val & DROP_UNBOUND) if (val & DROP_UNBOUND)
i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND); i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
if (val & DROP_SHRINK_ALL) if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(i915); i915_gem_shrink_all(i915);
......
...@@ -980,10 +980,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -980,10 +980,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (err) if (err)
goto out; goto out;
err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
goto out_ww;
if (i915_gem_object_has_pages(obj) && if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) && i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
...@@ -1026,9 +1022,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -1026,9 +1022,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED; args->retained = obj->mm.madv != __I915_MADV_PURGED;
mutex_unlock(&obj->mm.lock);
out_ww:
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
out: out:
i915_gem_object_put(obj); i915_gem_object_put(obj);
......
...@@ -44,7 +44,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, ...@@ -44,7 +44,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
* the DMA remapper, i915_gem_shrink will return 0. * the DMA remapper, i915_gem_shrink will return 0.
*/ */
GEM_BUG_ON(obj->mm.pages == pages); GEM_BUG_ON(obj->mm.pages == pages);
} while (i915_gem_shrink(to_i915(obj->base.dev), } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
obj->base.size >> PAGE_SHIFT, NULL, obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND)); I915_SHRINK_UNBOUND));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment