Commit 442dbc5c authored by Chris Wilson's avatar Chris Wilson

drm/i915: Make exclusive awaits on i915_active optional

Later use will require asynchronous waits on the active timelines, but
will not utilize an async wait on the exclusive channel. Make the await
on the exclusive fence explicit in the selection flags.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200406155840.1728-1-chris@chris-wilson.co.uk
parent 39d571d1
...@@ -549,14 +549,15 @@ static int await_active(struct i915_active *ref, ...@@ -549,14 +549,15 @@ static int await_active(struct i915_active *ref,
{ {
int err = 0; int err = 0;
/* We must always wait for the exclusive fence! */ if (flags & I915_ACTIVE_AWAIT_EXCL &&
if (rcu_access_pointer(ref->excl.fence)) { rcu_access_pointer(ref->excl.fence)) {
err = __await_active(&ref->excl, fn, arg); err = __await_active(&ref->excl, fn, arg);
if (err) if (err)
return err; return err;
} }
if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) { if (flags & I915_ACTIVE_AWAIT_ACTIVE &&
i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n; struct active_node *it, *n;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
......
...@@ -193,7 +193,8 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence, ...@@ -193,7 +193,8 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
int i915_request_await_active(struct i915_request *rq, int i915_request_await_active(struct i915_request *rq,
struct i915_active *ref, struct i915_active *ref,
unsigned int flags); unsigned int flags);
#define I915_ACTIVE_AWAIT_ALL BIT(0) #define I915_ACTIVE_AWAIT_EXCL BIT(0)
#define I915_ACTIVE_AWAIT_ACTIVE BIT(1)
int i915_active_acquire(struct i915_active *ref); int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref); bool i915_active_acquire_if_busy(struct i915_active *ref);
......
...@@ -1948,7 +1948,7 @@ emit_oa_config(struct i915_perf_stream *stream, ...@@ -1948,7 +1948,7 @@ emit_oa_config(struct i915_perf_stream *stream,
if (!IS_ERR_OR_NULL(active)) { if (!IS_ERR_OR_NULL(active)) {
/* After all individual context modifications */ /* After all individual context modifications */
err = i915_request_await_active(rq, active, err = i915_request_await_active(rq, active,
I915_ACTIVE_AWAIT_ALL); I915_ACTIVE_AWAIT_ACTIVE);
if (err) if (err)
goto err_add_request; goto err_add_request;
......
...@@ -1167,7 +1167,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) ...@@ -1167,7 +1167,8 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */ /* Wait for the vma to be bound before we start! */
err = i915_request_await_active(rq, &vma->active, 0); err = i915_request_await_active(rq, &vma->active,
I915_ACTIVE_AWAIT_EXCL);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment