Commit 38813767 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Flush all active callbacks

Flushing the outer i915_active is not enough, as we need the barrier to
be applied across all the active dma_fence callbacks. So we must
serialise with each outstanding fence.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112096
References: f79520bb ("drm/i915/selftests: Synchronize checking active status with retirement")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191101181022.25633-1-chris@chris-wilson.co.uk
parent 9278bbb6
...@@ -53,9 +53,7 @@ static struct pulse *pulse_create(void) ...@@ -53,9 +53,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p) static void pulse_unlock_wait(struct pulse *p)
{ {
mutex_lock(&p->active.mutex); i915_active_unlock_wait(&p->active);
mutex_unlock(&p->active.mutex);
flush_work(&p->active.work);
} }
static int __live_idle_pulse(struct intel_engine_cs *engine, static int __live_idle_pulse(struct intel_engine_cs *engine,
......
...@@ -215,5 +215,6 @@ void i915_active_acquire_barrier(struct i915_active *ref); ...@@ -215,5 +215,6 @@ void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq); void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m); void i915_active_print(struct i915_active *ref, struct drm_printer *m);
void i915_active_unlock_wait(struct i915_active *ref);
#endif /* _I915_ACTIVE_H_ */ #endif /* _I915_ACTIVE_H_ */
...@@ -250,3 +250,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m) ...@@ -250,3 +250,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
i915_active_release(ref); i915_active_release(ref);
} }
} }
static void spin_unlock_wait(spinlock_t *lock)
{
spin_lock_irq(lock);
spin_unlock_irq(lock);
}
void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
rcu_read_lock();
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
struct dma_fence *f;
/* Wait for all active callbacks */
f = rcu_dereference(it->base.fence);
if (f)
spin_unlock_wait(f->lock);
}
rcu_read_unlock();
i915_active_release(ref);
}
/* And wait for the retire callback */
mutex_lock(&ref->mutex);
mutex_unlock(&ref->mutex);
/* ... which may have been on a thread instead */
flush_work(&ref->work);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment