Commit 25ffd4b1 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Markup expected timeline locks for i915_active

As every i915_active_request should be serialised by a dedicated lock,
i915_active consists of a tree of locks; one for each node. Markup up
the i915_active_request with what lock is supposed to be guarding it so
that we can verify that the serialised updated are indeed serialised.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190816121000.8507-2-chris@chris-wilson.co.uk
parent 6c69a454
...@@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *)) ...@@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
if (IS_ERR(rq)) if (IS_ERR(rq))
return rq; return rq;
err = i915_active_ref(&overlay->last_flip, rq->fence.context, rq); err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
if (err) { if (err) {
i915_request_add(rq); i915_request_add(rq);
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -211,7 +211,7 @@ static void clear_pages_worker(struct work_struct *work) ...@@ -211,7 +211,7 @@ static void clear_pages_worker(struct work_struct *work)
* keep track of the GPU activity within this vma/request, and * keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma. * propagate the signal from the request to w->dma.
*/ */
err = i915_active_ref(&vma->active, rq->fence.context, rq); err = i915_active_ref(&vma->active, rq->timeline, rq);
if (err) if (err)
goto out_request; goto out_request;
......
...@@ -908,7 +908,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, ...@@ -908,7 +908,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit) if (emit)
err = emit(rq, data); err = emit(rq, data);
if (err == 0) if (err == 0)
err = i915_active_ref(&cb->base, rq->fence.context, rq); err = i915_active_ref(&cb->base, rq->timeline, rq);
i915_request_add(rq); i915_request_add(rq);
if (err) if (err)
......
...@@ -306,10 +306,10 @@ int intel_context_prepare_remote_request(struct intel_context *ce, ...@@ -306,10 +306,10 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
/* Queue this switch after current activity by this context. */ /* Queue this switch after current activity by this context. */
err = i915_active_request_set(&tl->last_request, rq); err = i915_active_request_set(&tl->last_request, rq);
mutex_unlock(&tl->mutex);
if (err) if (err)
goto unlock; return err;
} }
lockdep_assert_held(&tl->mutex);
/* /*
* Guarantee context image and the timeline remains pinned until the * Guarantee context image and the timeline remains pinned until the
...@@ -319,12 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce, ...@@ -319,12 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
* words transfer the pinned ce object to tracked active request. * words transfer the pinned ce object to tracked active request.
*/ */
GEM_BUG_ON(i915_active_is_idle(&ce->active)); GEM_BUG_ON(i915_active_is_idle(&ce->active));
err = i915_active_ref(&ce->active, rq->fence.context, rq); return i915_active_ref(&ce->active, rq->timeline, rq);
unlock:
if (rq->timeline != tl)
mutex_unlock(&tl->mutex);
return err;
} }
struct i915_request *intel_context_create_request(struct intel_context *ce) struct i915_request *intel_context_create_request(struct intel_context *ce)
......
...@@ -18,7 +18,7 @@ static inline int ...@@ -18,7 +18,7 @@ static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node, intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
struct i915_request *rq) struct i915_request *rq)
{ {
return i915_active_ref(&node->active, rq->fence.context, rq); return i915_active_ref(&node->active, rq->timeline, rq);
} }
static inline void static inline void
......
...@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline, ...@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests); INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync); i915_syncmap_init(&timeline->sync);
...@@ -440,8 +440,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, ...@@ -440,8 +440,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
* free it after the current request is retired, which ensures that * free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete. * all writes into the cacheline from previous requests are complete.
*/ */
err = i915_active_ref(&tl->hwsp_cacheline->active, err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
tl->fence_context, rq);
if (err) if (err)
goto err_cacheline; goto err_cacheline;
...@@ -492,7 +491,7 @@ int intel_timeline_get_seqno(struct intel_timeline *tl, ...@@ -492,7 +491,7 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
static int cacheline_ref(struct intel_timeline_cacheline *cl, static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq) struct i915_request *rq)
{ {
return i915_active_ref(&cl->active, rq->fence.context, rq); return i915_active_ref(&cl->active, rq->timeline, rq);
} }
int intel_timeline_read_hwsp(struct i915_request *from, int intel_timeline_read_hwsp(struct i915_request *from,
......
...@@ -689,7 +689,9 @@ static int live_hwsp_wrap(void *arg) ...@@ -689,7 +689,9 @@ static int live_hwsp_wrap(void *arg)
tl->seqno = -4u; tl->seqno = -4u;
mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
err = intel_timeline_get_seqno(tl, rq, &seqno[0]); err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
mutex_unlock(&tl->mutex);
if (err) { if (err) {
i915_request_add(rq); i915_request_add(rq);
goto out; goto out;
...@@ -704,7 +706,9 @@ static int live_hwsp_wrap(void *arg) ...@@ -704,7 +706,9 @@ static int live_hwsp_wrap(void *arg)
} }
hwsp_seqno[0] = tl->hwsp_seqno; hwsp_seqno[0] = tl->hwsp_seqno;
mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
err = intel_timeline_get_seqno(tl, rq, &seqno[1]); err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
mutex_unlock(&tl->mutex);
if (err) { if (err) {
i915_request_add(rq); i915_request_add(rq);
goto out; goto out;
......
...@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context) ...@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests); INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync); i915_syncmap_init(&timeline->sync);
......
...@@ -169,10 +169,11 @@ node_retire(struct i915_active_request *base, struct i915_request *rq) ...@@ -169,10 +169,11 @@ node_retire(struct i915_active_request *base, struct i915_request *rq)
} }
static struct i915_active_request * static struct i915_active_request *
active_instance(struct i915_active *ref, u64 idx) active_instance(struct i915_active *ref, struct intel_timeline *tl)
{ {
struct active_node *node, *prealloc; struct active_node *node, *prealloc;
struct rb_node **p, *parent; struct rb_node **p, *parent;
u64 idx = tl->fence_context;
/* /*
* We track the most recently used timeline to skip a rbtree search * We track the most recently used timeline to skip a rbtree search
...@@ -211,7 +212,7 @@ active_instance(struct i915_active *ref, u64 idx) ...@@ -211,7 +212,7 @@ active_instance(struct i915_active *ref, u64 idx)
} }
node = prealloc; node = prealloc;
i915_active_request_init(&node->base, NULL, node_retire); i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
node->ref = ref; node->ref = ref;
node->timeline = idx; node->timeline = idx;
...@@ -294,18 +295,20 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node) ...@@ -294,18 +295,20 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node)
} }
int i915_active_ref(struct i915_active *ref, int i915_active_ref(struct i915_active *ref,
u64 timeline, struct intel_timeline *tl,
struct i915_request *rq) struct i915_request *rq)
{ {
struct i915_active_request *active; struct i915_active_request *active;
int err; int err;
lockdep_assert_held(&tl->mutex);
/* Prevent reaping in case we malloc/wait while building the tree */ /* Prevent reaping in case we malloc/wait while building the tree */
err = i915_active_acquire(ref); err = i915_active_acquire(ref);
if (err) if (err)
return err; return err;
active = active_instance(ref, timeline); active = active_instance(ref, tl);
if (!active) { if (!active) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -596,6 +599,10 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, ...@@ -596,6 +599,10 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
goto unwind; goto unwind;
} }
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
node->base.lock =
&engine->kernel_context->timeline->mutex;
#endif
RCU_INIT_POINTER(node->base.request, NULL); RCU_INIT_POINTER(node->base.request, NULL);
node->base.retire = node_retire; node->base.retire = node_retire;
node->timeline = idx; node->timeline = idx;
...@@ -701,6 +708,10 @@ int i915_active_request_set(struct i915_active_request *active, ...@@ -701,6 +708,10 @@ int i915_active_request_set(struct i915_active_request *active,
{ {
int err; int err;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
lockdep_assert_held(active->lock);
#endif
/* Must maintain ordering wrt previous active requests */ /* Must maintain ordering wrt previous active requests */
err = i915_request_await_active_request(rq, active); err = i915_request_await_active_request(rq, active);
if (err) if (err)
......
...@@ -58,15 +58,20 @@ void i915_active_retire_noop(struct i915_active_request *active, ...@@ -58,15 +58,20 @@ void i915_active_retire_noop(struct i915_active_request *active,
*/ */
static inline void static inline void
i915_active_request_init(struct i915_active_request *active, i915_active_request_init(struct i915_active_request *active,
struct mutex *lock,
struct i915_request *rq, struct i915_request *rq,
i915_active_retire_fn retire) i915_active_retire_fn retire)
{ {
RCU_INIT_POINTER(active->request, rq); RCU_INIT_POINTER(active->request, rq);
INIT_LIST_HEAD(&active->link); INIT_LIST_HEAD(&active->link);
active->retire = retire ?: i915_active_retire_noop; active->retire = retire ?: i915_active_retire_noop;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
active->lock = lock;
#endif
} }
#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL) #define INIT_ACTIVE_REQUEST(name, lock) \
i915_active_request_init((name), (lock), NULL, NULL)
/** /**
* i915_active_request_set - updates the tracker to watch the current request * i915_active_request_set - updates the tracker to watch the current request
...@@ -81,6 +86,9 @@ static inline void ...@@ -81,6 +86,9 @@ static inline void
__i915_active_request_set(struct i915_active_request *active, __i915_active_request_set(struct i915_active_request *active,
struct i915_request *request) struct i915_request *request)
{ {
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
lockdep_assert_held(active->lock);
#endif
list_move(&active->link, &request->active_list); list_move(&active->link, &request->active_list);
rcu_assign_pointer(active->request, request); rcu_assign_pointer(active->request, request);
} }
...@@ -362,7 +370,7 @@ void __i915_active_init(struct drm_i915_private *i915, ...@@ -362,7 +370,7 @@ void __i915_active_init(struct drm_i915_private *i915,
} while (0) } while (0)
int i915_active_ref(struct i915_active *ref, int i915_active_ref(struct i915_active *ref,
u64 timeline, struct intel_timeline *tl,
struct i915_request *rq); struct i915_request *rq);
int i915_active_wait(struct i915_active *ref); int i915_active_wait(struct i915_active *ref);
......
...@@ -24,6 +24,21 @@ struct i915_active_request { ...@@ -24,6 +24,21 @@ struct i915_active_request {
struct i915_request __rcu *request; struct i915_request __rcu *request;
struct list_head link; struct list_head link;
i915_active_retire_fn retire; i915_active_retire_fn retire;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
/*
* Incorporeal!
*
* Updates to the i915_active_request must be serialised under a lock
* to ensure that the timeline is ordered. Normally, this is the
* timeline->mutex, but another mutex may be used so long as it is
* done so consistently.
*
* For lockdep tracking of the above, we store the lock we intend
* to always use for updates of this i915_active_request during
* construction and assert that is held on every update.
*/
struct mutex *lock;
#endif
}; };
struct active_node; struct active_node;
......
...@@ -903,14 +903,14 @@ int i915_vma_move_to_active(struct i915_vma *vma, ...@@ -903,14 +903,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped * add the active reference first and queue for it to be dropped
* *last*. * *last*.
*/ */
err = i915_active_ref(&vma->active, rq->fence.context, rq); err = i915_active_ref(&vma->active, rq->timeline, rq);
if (unlikely(err)) if (unlikely(err))
return err; return err;
if (flags & EXEC_OBJECT_WRITE) { if (flags & EXEC_OBJECT_WRITE) {
if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS)) if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
i915_active_ref(&obj->frontbuffer->write, i915_active_ref(&obj->frontbuffer->write,
rq->fence.context, rq->timeline,
rq); rq);
reservation_object_add_excl_fence(vma->resv, &rq->fence); reservation_object_add_excl_fence(vma->resv, &rq->fence);
......
...@@ -110,8 +110,7 @@ __live_active_setup(struct drm_i915_private *i915) ...@@ -110,8 +110,7 @@ __live_active_setup(struct drm_i915_private *i915)
submit, submit,
GFP_KERNEL); GFP_KERNEL);
if (err >= 0) if (err >= 0)
err = i915_active_ref(&active->base, err = i915_active_ref(&active->base, rq->timeline, rq);
rq->fence.context, rq);
i915_request_add(rq); i915_request_add(rq);
if (err) { if (err) {
pr_err("Failed to track active ref!\n"); pr_err("Failed to track active ref!\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment