Commit b1e3177b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Coordinate i915_active with its own mutex

Forgo the struct_mutex serialisation for i915_active, and interpose its
own mutex handling for active/retire.

This is a multi-layered sleight-of-hand. First, we had to ensure that no
active/retire callbacks accidentally inverted the mutex ordering rules,
nor assumed that they were themselves serialised by struct_mutex. More
challenging though, is the rule over updating elements of the active
rbtree. Instead of the whole i915_active now being serialised by
struct_mutex, allocations/rotations of the tree are serialised by the
i915_active.mutex and individual nodes are serialised by the caller
using the i915_timeline.mutex (we need to use nested spinlocks to
interact with the dma_fence callback lists).

The pain point here is that instead of a single mutex around execbuf, we
now have to take a mutex for active tracker (one for each vma, context,
etc) and a couple of spinlocks for each fence update. The improvement in
fine grained locking allowing for multiple concurrent clients
(eventually!) should be worth it in typical loads.

v2: Add some comments that barely elucidate anything :(
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-6-chris@chris-wilson.co.uk
parent 274cbf20
...@@ -257,7 +257,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) ...@@ -257,7 +257,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
front->obj = obj; front->obj = obj;
kref_init(&front->ref); kref_init(&front->ref);
atomic_set(&front->bits, 0); atomic_set(&front->bits, 0);
i915_active_init(i915, &front->write, i915_active_init(&front->write,
frontbuffer_active, frontbuffer_active,
i915_active_may_sleep(frontbuffer_retire)); i915_active_may_sleep(frontbuffer_retire));
......
...@@ -1360,8 +1360,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) ...@@ -1360,8 +1360,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75; overlay->contrast = 75;
overlay->saturation = 146; overlay->saturation = 146;
i915_active_init(dev_priv, i915_active_init(&overlay->last_flip,
&overlay->last_flip,
NULL, intel_overlay_last_flip_retire); NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
......
...@@ -868,20 +868,18 @@ static int context_barrier_task(struct i915_gem_context *ctx, ...@@ -868,20 +868,18 @@ static int context_barrier_task(struct i915_gem_context *ctx,
void (*task)(void *data), void (*task)(void *data),
void *data) void *data)
{ {
struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb; struct context_barrier_task *cb;
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct intel_context *ce; struct intel_context *ce;
int err = 0; int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!task); GEM_BUG_ON(!task);
cb = kmalloc(sizeof(*cb), GFP_KERNEL); cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb) if (!cb)
return -ENOMEM; return -ENOMEM;
i915_active_init(i915, &cb->base, NULL, cb_retire); i915_active_init(&cb->base, NULL, cb_retire);
err = i915_active_acquire(&cb->base); err = i915_active_acquire(&cb->base);
if (err) { if (err) {
kfree(cb); kfree(cb);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define __I915_GEM_OBJECT_TYPES_H__ #define __I915_GEM_OBJECT_TYPES_H__
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <uapi/drm/i915_drm.h>
#include "i915_active.h" #include "i915_active.h"
#include "i915_selftest.h" #include "i915_selftest.h"
......
...@@ -16,14 +16,11 @@ static void call_idle_barriers(struct intel_engine_cs *engine) ...@@ -16,14 +16,11 @@ static void call_idle_barriers(struct intel_engine_cs *engine)
struct llist_node *node, *next; struct llist_node *node, *next;
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
struct i915_active_request *active = struct dma_fence_cb *cb =
container_of((struct list_head *)node, container_of((struct list_head *)node,
typeof(*active), link); typeof(*cb), node);
INIT_LIST_HEAD(&active->link); cb->func(NULL, cb);
RCU_INIT_POINTER(active->request, NULL);
active->retire(active, NULL);
} }
} }
......
...@@ -240,7 +240,7 @@ intel_context_init(struct intel_context *ce, ...@@ -240,7 +240,7 @@ intel_context_init(struct intel_context *ce,
mutex_init(&ce->pin_mutex); mutex_init(&ce->pin_mutex);
i915_active_init(ctx->i915, &ce->active, i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire); __intel_context_active, __intel_context_retire);
} }
...@@ -307,7 +307,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce, ...@@ -307,7 +307,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
return err; return err;
/* Queue this switch after current activity by this context. */ /* Queue this switch after current activity by this context. */
err = i915_active_request_set(&tl->last_request, rq); err = i915_active_fence_set(&tl->last_request, rq);
mutex_unlock(&tl->mutex); mutex_unlock(&tl->mutex);
if (err) if (err)
return err; return err;
......
...@@ -95,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz) ...@@ -95,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
node->pool = pool; node->pool = pool;
i915_active_init(engine->i915, &node->active, pool_active, pool_retire); i915_active_init(&node->active, pool_active, pool_retire);
obj = i915_gem_object_create_internal(engine->i915, sz); obj = i915_gem_object_create_internal(engine->i915, sz);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
......
...@@ -844,10 +844,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -844,10 +844,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/ */
spin_lock_irqsave(&timelines->lock, flags); spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) { list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq; struct dma_fence *fence;
rq = i915_active_request_get_unlocked(&tl->last_request); fence = i915_active_fence_get(&tl->last_request);
if (!rq) if (!fence)
continue; continue;
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock_irqrestore(&timelines->lock, flags);
...@@ -859,8 +859,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -859,8 +859,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
* in the worst case. * in the worst case.
*/ */
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq); dma_fence_put(fence);
/* Restart iteration after droping lock */ /* Restart iteration after droping lock */
spin_lock_irqsave(&timelines->lock, flags); spin_lock_irqsave(&timelines->lock, flags);
......
...@@ -178,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) ...@@ -178,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
cl->hwsp = hwsp; cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline); cl->vaddr = page_pack_bits(vaddr, cacheline);
i915_active_init(hwsp->gt->i915, &cl->active, i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
__cacheline_active, __cacheline_retire);
return cl; return cl;
} }
...@@ -255,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline, ...@@ -255,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex); INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests); INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync); i915_syncmap_init(&timeline->sync);
...@@ -443,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, ...@@ -443,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
* free it after the current request is retired, which ensures that * free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete. * all writes into the cacheline from previous requests are complete.
*/ */
err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq); err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
if (err) if (err)
goto err_cacheline; goto err_cacheline;
......
...@@ -58,12 +58,13 @@ struct intel_timeline { ...@@ -58,12 +58,13 @@ struct intel_timeline {
*/ */
struct list_head requests; struct list_head requests;
/* Contains an RCU guarded pointer to the last request. No reference is /*
* Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to * held to the request, users must carefully acquire a reference to
* the request using i915_active_request_get_request_rcu(), or hold the * the request using i915_active_fence_get(), or manage the RCU
* struct_mutex. * protection themselves (cf the i915_active_fence API).
*/ */
struct i915_active_request last_request; struct i915_active_fence last_request;
/** /**
* We track the most recent seqno that we wait on in every context so * We track the most recent seqno that we wait on in every context so
......
...@@ -47,24 +47,20 @@ static int context_sync(struct intel_context *ce) ...@@ -47,24 +47,20 @@ static int context_sync(struct intel_context *ce)
mutex_lock(&tl->mutex); mutex_lock(&tl->mutex);
do { do {
struct i915_request *rq; struct dma_fence *fence;
long timeout; long timeout;
rcu_read_lock(); fence = i915_active_fence_get(&tl->last_request);
rq = rcu_dereference(tl->last_request.request); if (!fence)
if (rq)
rq = i915_request_get_rcu(rq);
rcu_read_unlock();
if (!rq)
break; break;
timeout = i915_request_wait(rq, 0, HZ / 10); timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
if (timeout < 0) if (timeout < 0)
err = timeout; err = timeout;
else else
i915_request_retire_upto(rq); i915_request_retire_upto(to_request(fence));
i915_request_put(rq); dma_fence_put(fence);
} while (!err); } while (!err);
mutex_unlock(&tl->mutex); mutex_unlock(&tl->mutex);
......
...@@ -1172,9 +1172,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine) ...@@ -1172,9 +1172,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
if (!rq) if (!rq)
return NULL; return NULL;
INIT_LIST_HEAD(&rq->active_list);
rq->engine = engine; rq->engine = engine;
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->fence.cb_list);
rq->fence.lock = &rq->lock;
rq->fence.ops = &i915_fence_ops;
i915_sched_node_init(&rq->sched); i915_sched_node_init(&rq->sched);
/* mark this request as permanently incomplete */ /* mark this request as permanently incomplete */
...@@ -1267,8 +1271,8 @@ static int live_suppress_wait_preempt(void *arg) ...@@ -1267,8 +1271,8 @@ static int live_suppress_wait_preempt(void *arg)
} }
/* Disable NEWCLIENT promotion */ /* Disable NEWCLIENT promotion */
__i915_active_request_set(&i915_request_timeline(rq[i])->last_request, __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
dummy); &dummy->fence);
i915_request_add(rq[i]); i915_request_add(rq[i]);
} }
......
...@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context) ...@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex); INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests); INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync); i915_syncmap_init(&timeline->sync);
......
...@@ -385,11 +385,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) ...@@ -385,11 +385,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq; struct i915_request *rq;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->req) if (workload->req)
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
...@@ -17,17 +17,9 @@ ...@@ -17,17 +17,9 @@
#include "i915_utils.h" #include "i915_utils.h"
struct drm_i915_private; struct i915_active_fence {
struct i915_active_request; struct dma_fence __rcu *fence;
struct i915_request; struct dma_fence_cb cb;
typedef void (*i915_active_retire_fn)(struct i915_active_request *,
struct i915_request *);
struct i915_active_request {
struct i915_request __rcu *request;
struct list_head link;
i915_active_retire_fn retire;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
/* /*
* Incorporeal! * Incorporeal!
...@@ -53,20 +45,17 @@ struct active_node; ...@@ -53,20 +45,17 @@ struct active_node;
#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2) #define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
struct i915_active { struct i915_active {
struct drm_i915_private *i915; atomic_t count;
struct mutex mutex;
struct active_node *cache; struct active_node *cache;
struct rb_root tree; struct rb_root tree;
struct mutex mutex;
atomic_t count;
/* Preallocated "exclusive" node */ /* Preallocated "exclusive" node */
struct dma_fence __rcu *excl; struct i915_active_fence excl;
struct dma_fence_cb excl_cb;
unsigned long flags; unsigned long flags;
#define I915_ACTIVE_RETIRE_SLEEPS BIT(0) #define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
#define I915_ACTIVE_GRAB_BIT 1
int (*active)(struct i915_active *ref); int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref); void (*retire)(struct i915_active *ref);
......
...@@ -892,28 +892,38 @@ wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout) ...@@ -892,28 +892,38 @@ wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout)
spin_lock_irqsave(&timelines->lock, flags); spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) { list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq; struct dma_fence *fence;
rq = i915_active_request_get_unlocked(&tl->last_request); fence = i915_active_fence_get(&tl->last_request);
if (!rq) if (!fence)
continue; continue;
spin_unlock_irqrestore(&timelines->lock, flags); spin_unlock_irqrestore(&timelines->lock, flags);
/* if (!dma_fence_is_i915(fence)) {
* "Race-to-idle". timeout = dma_fence_wait_timeout(fence,
* flags & I915_WAIT_INTERRUPTIBLE,
* Switching to the kernel context is often used a synchronous timeout);
* step prior to idling, e.g. in suspend for flushing all } else {
* current operations to memory before sleeping. These we struct i915_request *rq = to_request(fence);
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks. /*
*/ * "Race-to-idle".
if (wait & I915_WAIT_FOR_IDLE_BOOST) *
gen6_rps_boost(rq); * Switching to the kernel context is often used as
* a synchronous step prior to idling, e.g. in suspend
* for flushing all current operations to memory before
* sleeping. These we want to complete as quickly as
* possible to avoid prolonged stalls, so allow the gpu
* to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq);
timeout = i915_request_wait(rq, flags, timeout);
}
timeout = i915_request_wait(rq, wait, timeout); dma_fence_put(fence);
i915_request_put(rq);
if (timeout < 0) if (timeout < 0)
return timeout; return timeout;
......
...@@ -1861,7 +1861,6 @@ static const struct i915_vma_ops pd_vma_ops = { ...@@ -1861,7 +1861,6 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{ {
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1872,7 +1871,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) ...@@ -1872,7 +1871,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma) if (!vma)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
i915_active_init(i915, &vma->active, NULL, NULL); i915_active_init(&vma->active, NULL, NULL);
mutex_init(&vma->pages_mutex); mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(&ggtt->vm); vma->vm = i915_vm_get(&ggtt->vm);
......
...@@ -1299,7 +1299,7 @@ capture_vma(struct capture_vma *next, ...@@ -1299,7 +1299,7 @@ capture_vma(struct capture_vma *next,
if (!c) if (!c)
return next; return next;
if (!i915_active_trygrab(&vma->active)) { if (!i915_active_acquire_if_busy(&vma->active)) {
kfree(c); kfree(c);
return next; return next;
} }
...@@ -1439,7 +1439,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress) ...@@ -1439,7 +1439,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
*this->slot = *this->slot =
i915_error_object_create(i915, vma, compress); i915_error_object_create(i915, vma, compress);
i915_active_ungrab(&vma->active); i915_active_release(&vma->active);
i915_vma_put(vma); i915_vma_put(vma);
capture = this->next; capture = this->next;
......
...@@ -218,8 +218,6 @@ static void remove_from_engine(struct i915_request *rq) ...@@ -218,8 +218,6 @@ static void remove_from_engine(struct i915_request *rq)
static bool i915_request_retire(struct i915_request *rq) static bool i915_request_retire(struct i915_request *rq)
{ {
struct i915_active_request *active, *next;
if (!i915_request_completed(rq)) if (!i915_request_completed(rq))
return false; return false;
...@@ -244,35 +242,6 @@ static bool i915_request_retire(struct i915_request *rq) ...@@ -244,35 +242,6 @@ static bool i915_request_retire(struct i915_request *rq)
&i915_request_timeline(rq)->requests)); &i915_request_timeline(rq)->requests));
rq->ring->head = rq->postfix; rq->ring->head = rq->postfix;
/*
* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
* when their *last* active request is completed (updating state
* tracking lists for eviction, active references for GEM, etc).
*
* As the ->retire() may free the node, we decouple it first and
* pass along the auxiliary information (to avoid dereferencing
* the node after the callback).
*/
list_for_each_entry_safe(active, next, &rq->active_list, link) {
/*
* In microbenchmarks or focusing upon time inside the kernel,
* we may spend an inordinate amount of time simply handling
* the retirement of requests and processing their callbacks.
* Of which, this loop itself is particularly hot due to the
* cache misses when jumping around the list of
* i915_active_request. So we try to keep this loop as
* streamlined as possible and also prefetch the next
* i915_active_request to try and hide the likely cache miss.
*/
prefetchw(next);
INIT_LIST_HEAD(&active->link);
RCU_INIT_POINTER(active->request, NULL);
active->retire(active, rq);
}
local_irq_disable(); local_irq_disable();
/* /*
...@@ -704,7 +673,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -704,7 +673,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->flags = 0; rq->flags = 0;
rq->execution_mask = ALL_ENGINES; rq->execution_mask = ALL_ENGINES;
INIT_LIST_HEAD(&rq->active_list);
INIT_LIST_HEAD(&rq->execute_cb); INIT_LIST_HEAD(&rq->execute_cb);
/* /*
...@@ -743,7 +711,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -743,7 +711,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
ce->ring->emit = rq->head; ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */ /* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list));
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
...@@ -1174,8 +1141,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) ...@@ -1174,8 +1141,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single * precludes optimising to use semaphores serialisation of a single
* timeline across engines. * timeline across engines.
*/ */
prev = rcu_dereference_protected(timeline->last_request.request, prev = to_request(__i915_active_fence_set(&timeline->last_request,
lockdep_is_held(&timeline->mutex)); &rq->fence));
if (prev && !i915_request_completed(prev)) { if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask)) if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit, i915_sw_fence_await_sw_fence(&rq->submit,
...@@ -1200,7 +1167,6 @@ __i915_request_add_to_timeline(struct i915_request *rq) ...@@ -1200,7 +1167,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* us, the timeline will hold its seqno which is later than ours. * us, the timeline will hold its seqno which is later than ours.
*/ */
GEM_BUG_ON(timeline->seqno != rq->fence.seqno); GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
__i915_active_request_set(&timeline->last_request, rq);
return prev; return prev;
} }
......
...@@ -211,7 +211,6 @@ struct i915_request { ...@@ -211,7 +211,6 @@ struct i915_request {
* on the active_list (of their final request). * on the active_list (of their final request).
*/ */
struct i915_capture_list *capture_list; struct i915_capture_list *capture_list;
struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */ /** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies; unsigned long emitted_jiffies;
......
...@@ -120,8 +120,7 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -120,8 +120,7 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = obj->base.size; vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT; vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
i915_active_init(vm->i915, &vma->active, i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
__i915_vma_active, __i915_vma_retire);
/* Declare ourselves safe for use inside shrinkers */ /* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) { if (IS_ENABLED(CONFIG_LOCKDEP)) {
...@@ -1148,6 +1147,7 @@ int __i915_vma_unbind(struct i915_vma *vma) ...@@ -1148,6 +1147,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
if (ret) if (ret)
return ret; return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) { if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned"); vma_print_allocator(vma, "is pinned");
return -EBUSY; return -EBUSY;
......
...@@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915) ...@@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
return NULL; return NULL;
kref_init(&active->ref); kref_init(&active->ref);
i915_active_init(i915, &active->base, __live_active, __live_retire); i915_active_init(&active->base, __live_active, __live_retire);
return active; return active;
} }
...@@ -146,19 +146,13 @@ static int live_active_wait(void *arg) ...@@ -146,19 +146,13 @@ static int live_active_wait(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct live_active *active; struct live_active *active;
intel_wakeref_t wakeref;
int err = 0; int err = 0;
/* Check that we get a callback when requests retire upon waiting */ /* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
active = __live_active_setup(i915); active = __live_active_setup(i915);
if (IS_ERR(active)) { if (IS_ERR(active))
err = PTR_ERR(active); return PTR_ERR(active);
goto err;
}
i915_active_wait(&active->base); i915_active_wait(&active->base);
if (!READ_ONCE(active->retired)) { if (!READ_ONCE(active->retired)) {
...@@ -168,11 +162,9 @@ static int live_active_wait(void *arg) ...@@ -168,11 +162,9 @@ static int live_active_wait(void *arg)
__live_put(active); __live_put(active);
mutex_lock(&i915->drm.struct_mutex);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO; err = -EIO;
err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
return err; return err;
...@@ -182,23 +174,19 @@ static int live_active_retire(void *arg) ...@@ -182,23 +174,19 @@ static int live_active_retire(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct live_active *active; struct live_active *active;
intel_wakeref_t wakeref;
int err = 0; int err = 0;
/* Check that we get a callback when requests are indirectly retired */ /* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
active = __live_active_setup(i915); active = __live_active_setup(i915);
if (IS_ERR(active)) { if (IS_ERR(active))
err = PTR_ERR(active); return PTR_ERR(active);
goto err;
}
/* waits for & retires all requests */ /* waits for & retires all requests */
mutex_lock(&i915->drm.struct_mutex);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO; err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
if (!READ_ONCE(active->retired)) { if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n"); pr_err("i915_active not retired after flushing!\n");
...@@ -207,10 +195,6 @@ static int live_active_retire(void *arg) ...@@ -207,10 +195,6 @@ static int live_active_retire(void *arg)
__live_put(active); __live_put(active);
err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment