Commit 274cbf20 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Push the i915_active.retire into a worker

As we need to use a mutex to serialise i915_active activation
(because we want to allow the callback to sleep), we need to push the
i915_active.retire into a worker callback in case we get need to retire
from an atomic context.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-5-chris@chris-wilson.co.uk
parent 2850748e
...@@ -206,6 +206,7 @@ static int frontbuffer_active(struct i915_active *ref) ...@@ -206,6 +206,7 @@ static int frontbuffer_active(struct i915_active *ref)
return 0; return 0;
} }
__i915_active_call
static void frontbuffer_retire(struct i915_active *ref) static void frontbuffer_retire(struct i915_active *ref)
{ {
struct intel_frontbuffer *front = struct intel_frontbuffer *front =
...@@ -257,7 +258,8 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) ...@@ -257,7 +258,8 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
kref_init(&front->ref); kref_init(&front->ref);
atomic_set(&front->bits, 0); atomic_set(&front->bits, 0);
i915_active_init(i915, &front->write, i915_active_init(i915, &front->write,
frontbuffer_active, frontbuffer_retire); frontbuffer_active,
i915_active_may_sleep(frontbuffer_retire));
spin_lock(&i915->fb_tracking.lock); spin_lock(&i915->fb_tracking.lock);
if (obj->frontbuffer) { if (obj->frontbuffer) {
......
...@@ -848,6 +848,7 @@ struct context_barrier_task { ...@@ -848,6 +848,7 @@ struct context_barrier_task {
void *data; void *data;
}; };
__i915_active_call
static void cb_retire(struct i915_active *base) static void cb_retire(struct i915_active *base)
{ {
struct context_barrier_task *cb = container_of(base, typeof(*cb), base); struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
......
...@@ -138,6 +138,7 @@ static void __context_unpin_state(struct i915_vma *vma) ...@@ -138,6 +138,7 @@ static void __context_unpin_state(struct i915_vma *vma)
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
} }
__i915_active_call
static void __intel_context_retire(struct i915_active *active) static void __intel_context_retire(struct i915_active *active)
{ {
struct intel_context *ce = container_of(active, typeof(*ce), active); struct intel_context *ce = container_of(active, typeof(*ce), active);
...@@ -150,6 +151,7 @@ static void __intel_context_retire(struct i915_active *active) ...@@ -150,6 +151,7 @@ static void __intel_context_retire(struct i915_active *active)
intel_timeline_unpin(ce->timeline); intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring); intel_ring_unpin(ce->ring);
intel_context_put(ce); intel_context_put(ce);
} }
......
...@@ -61,6 +61,7 @@ static int pool_active(struct i915_active *ref) ...@@ -61,6 +61,7 @@ static int pool_active(struct i915_active *ref)
return 0; return 0;
} }
__i915_active_call
static void pool_retire(struct i915_active *ref) static void pool_retire(struct i915_active *ref)
{ {
struct intel_engine_pool_node *node = struct intel_engine_pool_node *node =
......
...@@ -136,6 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) ...@@ -136,6 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
kfree(cl); kfree(cl);
} }
__i915_active_call
static void __cacheline_retire(struct i915_active *active) static void __cacheline_retire(struct i915_active *active)
{ {
struct intel_timeline_cacheline *cl = struct intel_timeline_cacheline *cl =
......
...@@ -132,6 +132,7 @@ __active_retire(struct i915_active *ref) ...@@ -132,6 +132,7 @@ __active_retire(struct i915_active *ref)
bool retire = false; bool retire = false;
lockdep_assert_held(&ref->mutex); lockdep_assert_held(&ref->mutex);
GEM_BUG_ON(i915_active_is_idle(ref));
/* return the unused nodes to our slabcache -- flushing the allocator */ /* return the unused nodes to our slabcache -- flushing the allocator */
if (atomic_dec_and_test(&ref->count)) { if (atomic_dec_and_test(&ref->count)) {
...@@ -157,6 +158,19 @@ __active_retire(struct i915_active *ref) ...@@ -157,6 +158,19 @@ __active_retire(struct i915_active *ref)
ref->retire(ref); ref->retire(ref);
} }
static void
active_work(struct work_struct *wrk)
{
struct i915_active *ref = container_of(wrk, typeof(*ref), work);
GEM_BUG_ON(!atomic_read(&ref->count));
if (atomic_add_unless(&ref->count, -1, 1))
return;
mutex_lock(&ref->mutex);
__active_retire(ref);
}
static void static void
active_retire(struct i915_active *ref) active_retire(struct i915_active *ref)
{ {
...@@ -164,8 +178,13 @@ active_retire(struct i915_active *ref) ...@@ -164,8 +178,13 @@ active_retire(struct i915_active *ref)
if (atomic_add_unless(&ref->count, -1, 1)) if (atomic_add_unless(&ref->count, -1, 1))
return; return;
/* One active may be flushed from inside the acquire of another */ /* If we are inside interrupt context (fence signaling), defer */
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
!mutex_trylock(&ref->mutex)) {
queue_work(system_unbound_wq, &ref->work);
return;
}
__active_retire(ref); __active_retire(ref);
} }
...@@ -240,12 +259,16 @@ void __i915_active_init(struct drm_i915_private *i915, ...@@ -240,12 +259,16 @@ void __i915_active_init(struct drm_i915_private *i915,
void (*retire)(struct i915_active *ref), void (*retire)(struct i915_active *ref),
struct lock_class_key *key) struct lock_class_key *key)
{ {
unsigned long bits;
debug_active_init(ref); debug_active_init(ref);
ref->i915 = i915; ref->i915 = i915;
ref->flags = 0; ref->flags = 0;
ref->active = active; ref->active = active;
ref->retire = retire; ref->retire = ptr_unpack_bits(retire, &bits, 2);
if (bits & I915_ACTIVE_MAY_SLEEP)
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
ref->excl = NULL; ref->excl = NULL;
ref->tree = RB_ROOT; ref->tree = RB_ROOT;
...@@ -253,6 +276,7 @@ void __i915_active_init(struct drm_i915_private *i915, ...@@ -253,6 +276,7 @@ void __i915_active_init(struct drm_i915_private *i915,
init_llist_head(&ref->preallocated_barriers); init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0); atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key); __mutex_init(&ref->mutex, "i915_active", key);
INIT_WORK(&ref->work, active_work);
} }
static bool ____active_del_barrier(struct i915_active *ref, static bool ____active_del_barrier(struct i915_active *ref,
...@@ -504,6 +528,7 @@ int i915_active_wait(struct i915_active *ref) ...@@ -504,6 +528,7 @@ int i915_active_wait(struct i915_active *ref)
if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE)) if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
return -EINTR; return -EINTR;
flush_work(&ref->work);
if (!i915_active_is_idle(ref)) if (!i915_active_is_idle(ref))
return -EBUSY; return -EBUSY;
...@@ -544,8 +569,9 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) ...@@ -544,8 +569,9 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
void i915_active_fini(struct i915_active *ref) void i915_active_fini(struct i915_active *ref)
{ {
debug_active_fini(ref); debug_active_fini(ref);
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
GEM_BUG_ON(atomic_read(&ref->count)); GEM_BUG_ON(atomic_read(&ref->count));
GEM_BUG_ON(work_pending(&ref->work));
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
mutex_destroy(&ref->mutex); mutex_destroy(&ref->mutex);
} }
#endif #endif
......
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include "i915_utils.h"
struct drm_i915_private; struct drm_i915_private;
struct i915_active_request; struct i915_active_request;
...@@ -44,6 +47,11 @@ struct i915_active_request { ...@@ -44,6 +47,11 @@ struct i915_active_request {
struct active_node; struct active_node;
#define I915_ACTIVE_MAY_SLEEP BIT(0)
#define __i915_active_call __aligned(4)
#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
struct i915_active { struct i915_active {
struct drm_i915_private *i915; struct drm_i915_private *i915;
...@@ -57,11 +65,14 @@ struct i915_active { ...@@ -57,11 +65,14 @@ struct i915_active {
struct dma_fence_cb excl_cb; struct dma_fence_cb excl_cb;
unsigned long flags; unsigned long flags;
#define I915_ACTIVE_GRAB_BIT 0 #define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
#define I915_ACTIVE_GRAB_BIT 1
int (*active)(struct i915_active *ref); int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref); void (*retire)(struct i915_active *ref);
struct work_struct work;
struct llist_head preallocated_barriers; struct llist_head preallocated_barriers;
}; };
......
...@@ -91,6 +91,7 @@ static int __i915_vma_active(struct i915_active *ref) ...@@ -91,6 +91,7 @@ static int __i915_vma_active(struct i915_active *ref)
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
} }
__i915_active_call
static void __i915_vma_retire(struct i915_active *ref) static void __i915_vma_retire(struct i915_active *ref)
{ {
i915_vma_put(active_to_vma(ref)); i915_vma_put(active_to_vma(ref));
...@@ -1152,6 +1153,7 @@ int __i915_vma_unbind(struct i915_vma *vma) ...@@ -1152,6 +1153,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
return -EBUSY; return -EBUSY;
} }
GEM_BUG_ON(i915_vma_is_active(vma));
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
return 0; return 0;
......
...@@ -121,7 +121,7 @@ __live_active_setup(struct drm_i915_private *i915) ...@@ -121,7 +121,7 @@ __live_active_setup(struct drm_i915_private *i915)
} }
i915_active_release(&active->base); i915_active_release(&active->base);
if (active->retired && count) { if (READ_ONCE(active->retired) && count) {
pr_err("i915_active retired before submission!\n"); pr_err("i915_active retired before submission!\n");
err = -EINVAL; err = -EINVAL;
} }
...@@ -161,7 +161,7 @@ static int live_active_wait(void *arg) ...@@ -161,7 +161,7 @@ static int live_active_wait(void *arg)
} }
i915_active_wait(&active->base); i915_active_wait(&active->base);
if (!active->retired) { if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after waiting!\n"); pr_err("i915_active not retired after waiting!\n");
err = -EINVAL; err = -EINVAL;
} }
...@@ -200,7 +200,7 @@ static int live_active_retire(void *arg) ...@@ -200,7 +200,7 @@ static int live_active_retire(void *arg)
if (igt_flush_test(i915, I915_WAIT_LOCKED)) if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO; err = -EIO;
if (!active->retired) { if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n"); pr_err("i915_active not retired after flushing!\n");
err = -EINVAL; err = -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment