Commit a3258dbd authored by Thomas Hellström's avatar Thomas Hellström Committed by Daniel Vetter

drm/i915: Prepare for obj->mm.lock removal, v2.

Stolen objects need to lock, and we may call put_pages when
refcount drops to 0, ensure all calls are handled correctly.

Changes since v1:
- Rebase on top of upstream changes.

Idea-from: Thomas Hellström <thomas.hellstrom@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-33-maarten.lankhorst@linux.intel.com
parent 74827b53
......@@ -118,6 +118,20 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
/*
* If more than one potential simultaneous locker, assert held.
*/
static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
{
/*
* Note mm list lookup is protected by
* kref_get_unless_zero().
*/
if (IS_ENABLED(CONFIG_LOCKDEP) &&
kref_read(&obj->base.refcount) > 0)
lockdep_assert_held(&obj->mm.lock);
}
static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
bool intr)
......
......@@ -19,7 +19,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
bool shrinkable;
int i;
lockdep_assert_held(&obj->mm.lock);
assert_object_held_shared(obj);
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_DONTNEED;
......@@ -70,6 +70,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct list_head *list;
unsigned long flags;
lockdep_assert_held(&obj->mm.lock);
spin_lock_irqsave(&i915->mm.obj_lock, flags);
i915->mm.shrink_count++;
......@@ -91,6 +92,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
assert_object_held_shared(obj);
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
drm_dbg(&i915->drm,
"Attempting to obtain a purgeable object\n");
......@@ -118,6 +121,8 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err)
return err;
assert_object_held_shared(obj);
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
......@@ -145,7 +150,7 @@ void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Try to discard unwanted pages */
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->mm.lock);
assert_object_held_shared(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->ops->writeback)
......@@ -176,6 +181,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
assert_object_held_shared(obj);
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
......@@ -203,6 +210,9 @@ int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj)
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
/* May be called by shrinker from within get_pages() (on another bo) */
assert_object_held_shared(obj);
i915_gem_object_release_mmap_offset(obj);
/*
......
......@@ -637,13 +637,15 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
if (WARN_ON(!i915_gem_object_trylock(obj)))
return -EBUSY;
err = i915_gem_object_pin_pages(obj);
if (!err)
i915_gem_object_init_memory_region(obj, mem);
i915_gem_object_unlock(obj);
return 0;
return err;
}
static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment