Commit f130b712 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Serialise read/write of the barrier's engine

We use the request pointer inside the i915_active_node as the indicator
of the barrier's status; we mark it as used during
i915_request_add_active_barriers(), and search for an available barrier
in reuse_idle_barrier(). That check must be carefully serialised to
ensure we do use an engine for the barrier and not just a random
pointer. (Along the other reuse path, we are fully serialised by the
timeline->mutex.) The acquisition of the barrier itself is ordered through
the strong memory barrier in llist_del_all().

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111397
Fixes: d8af05ff ("drm/i915: Allow sharing the idle-barrier from other kernel requests")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190813200905.11369-1-chris@chris-wilson.co.uk
parent b26496ae
...@@ -52,11 +52,17 @@ static inline struct llist_node *barrier_to_ll(struct active_node *node) ...@@ -52,11 +52,17 @@ static inline struct llist_node *barrier_to_ll(struct active_node *node)
return (struct llist_node *)&node->base.link; return (struct llist_node *)&node->base.link;
} }
static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node *node)
{
return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
}
static inline struct intel_engine_cs * static inline struct intel_engine_cs *
barrier_to_engine(struct active_node *node) barrier_to_engine(struct active_node *node)
{ {
GEM_BUG_ON(!is_barrier(&node->base)); GEM_BUG_ON(!is_barrier(&node->base));
return (struct intel_engine_cs *)node->base.link.prev; return __barrier_to_engine(node);
} }
static inline struct active_node *barrier_from_ll(struct llist_node *x) static inline struct active_node *barrier_from_ll(struct llist_node *x)
...@@ -239,10 +245,11 @@ void __i915_active_init(struct drm_i915_private *i915, ...@@ -239,10 +245,11 @@ void __i915_active_init(struct drm_i915_private *i915,
__mutex_init(&ref->mutex, "i915_active", key); __mutex_init(&ref->mutex, "i915_active", key);
} }
static bool __active_del_barrier(struct i915_active *ref, static bool ____active_del_barrier(struct i915_active *ref,
struct active_node *node) struct active_node *node,
struct intel_engine_cs *engine)
{ {
struct intel_engine_cs *engine = barrier_to_engine(node);
struct llist_node *head = NULL, *tail = NULL; struct llist_node *head = NULL, *tail = NULL;
struct llist_node *pos, *next; struct llist_node *pos, *next;
...@@ -280,6 +287,12 @@ static bool __active_del_barrier(struct i915_active *ref, ...@@ -280,6 +287,12 @@ static bool __active_del_barrier(struct i915_active *ref,
return !node; return !node;
} }
static bool
__active_del_barrier(struct i915_active *ref, struct active_node *node)
{
return ____active_del_barrier(ref, node, barrier_to_engine(node));
}
int i915_active_ref(struct i915_active *ref, int i915_active_ref(struct i915_active *ref,
u64 timeline, u64 timeline,
struct i915_request *rq) struct i915_request *rq)
...@@ -517,6 +530,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) ...@@ -517,6 +530,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
for (p = prev; p; p = rb_next(p)) { for (p = prev; p; p = rb_next(p)) {
struct active_node *node = struct active_node *node =
rb_entry(p, struct active_node, node); rb_entry(p, struct active_node, node);
struct intel_engine_cs *engine;
if (node->timeline > idx) if (node->timeline > idx)
break; break;
...@@ -534,7 +548,10 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) ...@@ -534,7 +548,10 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
* the barrier before we claim it, so we have to check * the barrier before we claim it, so we have to check
* for success. * for success.
*/ */
if (is_barrier(&node->base) && __active_del_barrier(ref, node)) engine = __barrier_to_engine(node);
smp_rmb(); /* serialise with add_active_barriers */
if (is_barrier(&node->base) &&
____active_del_barrier(ref, node, engine))
goto match; goto match;
} }
...@@ -674,6 +691,7 @@ void i915_request_add_active_barriers(struct i915_request *rq) ...@@ -674,6 +691,7 @@ void i915_request_add_active_barriers(struct i915_request *rq)
*/ */
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq); RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
smp_wmb(); /* serialise with reuse_idle_barrier */
list_add_tail((struct list_head *)node, &rq->active_list); list_add_tail((struct list_head *)node, &rq->active_list);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment