Commit 164a4128 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Pretty print the i915_active

If the idle_pulse fails to flush the i915_active, dump the tree to see
if that has any clues.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191031101116.19894-1-chris@chris-wilson.co.uk
parent 1db257c5
...@@ -102,8 +102,12 @@ static int __live_idle_pulse(struct intel_engine_cs *engine, ...@@ -102,8 +102,12 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
pulse_unlock_wait(p); /* synchronize with the retirement callback */ pulse_unlock_wait(p); /* synchronize with the retirement callback */
if (!i915_active_is_idle(&p->active)) { if (!i915_active_is_idle(&p->active)) {
struct drm_printer m = drm_err_printer("pulse");
pr_err("%s: heartbeat pulse did not flush idle tasks\n", pr_err("%s: heartbeat pulse did not flush idle tasks\n",
engine->name); engine->name);
i915_active_print(&p->active, &m);
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
......
...@@ -214,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, ...@@ -214,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
void i915_active_acquire_barrier(struct i915_active *ref); void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq); void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
#endif /* _I915_ACTIVE_H_ */ #endif /* _I915_ACTIVE_H_ */
...@@ -205,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private *i915) ...@@ -205,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915); return i915_subtests(tests, i915);
} }
static struct intel_engine_cs *node_to_barrier(struct active_node *it)
{
struct intel_engine_cs *engine;
if (!is_barrier(&it->base))
return NULL;
engine = __barrier_to_engine(it);
smp_rmb(); /* serialise with add_active_barriers */
if (!is_barrier(&it->base))
return NULL;
return engine;
}
void i915_active_print(struct i915_active *ref, struct drm_printer *m)
{
drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
drm_printf(m, "\tpreallocated barriers? %s\n",
yesno(!llist_empty(&ref->preallocated_barriers)));
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
struct intel_engine_cs *engine;
engine = node_to_barrier(it);
if (engine) {
drm_printf(m, "\tbarrier: %s\n", engine->name);
continue;
}
if (i915_active_fence_isset(&it->base)) {
drm_printf(m,
"\ttimeline: %llx\n", it->timeline);
continue;
}
}
i915_active_release(ref);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment