Commit 74ceefd1 authored by Daniel Vetter's avatar Daniel Vetter Committed by Maarten Lankhorst

drm/i915: use might_lock_nested in get_pages annotation

So strictly speaking the existing annotation is also ok, because we
have a chain of

obj->mm.lock#I915_MM_GET_PAGES -> fs_reclaim -> obj->mm.lock

(the shrinker cannot get at an object while we're in get_pages, hence
this is safe). But it's confusing, so try to take the right subclass
of the lock.

This does a bit reduce our lockdep based checking, but then it's also
less fragile, in case we ever change the nesting around.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191104173720.2696-3-daniel.vetter@ffwll.ch
parent e692b402
...@@ -271,10 +271,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -271,10 +271,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
/*
* Only used by struct_mutex, when called "recursively" from
* direct-reclaim-esque. Safe because there is only every one
* struct_mutex in the entire system.
*/
I915_MM_SHRINKER = 1,
/*
* Used for obj->mm.lock when allocating pages. Safe because the object
* isn't yet on any LRU, and therefore the shrinker can't deadlock on
* it. As soon as the object has pages, obj->mm.lock nests within
* fs_reclaim.
*/
I915_MM_GET_PAGES = 1,
};
static inline int __must_check static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
might_lock(&obj->mm.lock); might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0; return 0;
...@@ -317,23 +334,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) ...@@ -317,23 +334,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj); __i915_gem_object_unpin_pages(obj);
} }
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
/*
* Only used by struct_mutex, when called "recursively" from
* direct-reclaim-esque. Safe because there is only every one
* struct_mutex in the entire system.
*/
I915_MM_SHRINKER = 1,
/*
* Used for obj->mm.lock when allocating pages. Safe because the object
* isn't yet on any LRU, and therefore the shrinker can't deadlock on
* it. As soon as the object has pages, obj->mm.lock nests within
* fs_reclaim.
*/
I915_MM_GET_PAGES = 1,
};
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment