Commit df069367 authored by Chris Wilson's avatar Chris Wilson Committed by Rodrigo Vivi

drm/i915: Protect i915_active iterators from the shrinker

If we allocate while iterating the rbtree of active nodes, we may hit
the shrinker and so retire the i915_active, reaping the rbtree. Modifying
the rbtree as we iterate is not good behaviour, so acquire the
i915_active first to keep the tree intact whenever we allocate.

Fixes: a42375af ("drm/i915: Release the active tracker tree upon idling")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190208134704.23039-1-chris@chris-wilson.co.ukReviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
(cherry picked from commit 312c4ba1)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 08f68752
...@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref, ...@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
struct i915_request *rq) struct i915_request *rq)
{ {
struct i915_active_request *active; struct i915_active_request *active;
int err = 0;
/* Prevent reaping in case we malloc/wait while building the tree */
i915_active_acquire(ref);
active = active_instance(ref, timeline); active = active_instance(ref, timeline);
if (IS_ERR(active)) if (IS_ERR(active)) {
return PTR_ERR(active); err = PTR_ERR(active);
goto out;
}
if (!i915_active_request_isset(active)) if (!i915_active_request_isset(active))
ref->count++; ref->count++;
__i915_active_request_set(active, rq); __i915_active_request_set(active, rq);
GEM_BUG_ON(!ref->count); GEM_BUG_ON(!ref->count);
return 0; out:
i915_active_release(ref);
return err;
} }
bool i915_active_acquire(struct i915_active *ref) bool i915_active_acquire(struct i915_active *ref)
...@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq, ...@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{ {
struct active_node *it, *n; struct active_node *it, *n;
int ret; int err = 0;
ret = i915_request_await_active_request(rq, &ref->last); /* await allocates and so we need to avoid hitting the shrinker */
if (ret) if (i915_active_acquire(ref))
return ret; goto out; /* was idle */
err = i915_request_await_active_request(rq, &ref->last);
if (err)
goto out;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
ret = i915_request_await_active_request(rq, &it->base); err = i915_request_await_active_request(rq, &it->base);
if (ret) if (err)
return ret; goto out;
} }
return 0; out:
i915_active_release(ref);
return err;
} }
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment