Commit 9db0c5ca authored by Chris Wilson's avatar Chris Wilson

drm/i915: Stop retiring along engine

We no longer track the execution order along the engine and so no longer
need to enforce ordering of retire along the engine.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-2-chris@chris-wilson.co.uk
parent ce476c80
...@@ -183,72 +183,23 @@ static void free_capture_list(struct i915_request *request) ...@@ -183,72 +183,23 @@ static void free_capture_list(struct i915_request *request)
} }
} }
static void __retire_engine_request(struct intel_engine_cs *engine, static bool i915_request_retire(struct i915_request *rq)
struct i915_request *rq)
{
GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
GEM_BUG_ON(!i915_request_completed(rq));
local_irq_disable();
spin_lock(&engine->timeline.lock);
GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
list_del_init(&rq->link);
spin_unlock(&engine->timeline.lock);
spin_lock(&rq->lock);
i915_request_mark_complete(rq);
if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
if (rq->waitboost) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
}
spin_unlock(&rq->lock);
local_irq_enable();
}
static void __retire_engine_upto(struct intel_engine_cs *engine,
struct i915_request *rq)
{
struct i915_request *tmp;
if (list_empty(&rq->link))
return;
do {
tmp = list_first_entry(&engine->timeline.requests,
typeof(*tmp), link);
GEM_BUG_ON(tmp->engine != engine);
__retire_engine_request(engine, tmp);
} while (tmp != rq);
}
static void i915_request_retire(struct i915_request *request)
{ {
struct i915_active_request *active, *next; struct i915_active_request *active, *next;
GEM_TRACE("%s fence %llx:%lld, current %d\n", lockdep_assert_held(&rq->i915->drm.struct_mutex);
request->engine->name, if (!i915_request_completed(rq))
request->fence.context, request->fence.seqno, return false;
hwsp_seqno(request));
lockdep_assert_held(&request->i915->drm.struct_mutex); GEM_TRACE("%s fence %llx:%lld, current %d\n",
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); rq->engine->name,
GEM_BUG_ON(!i915_request_completed(request)); rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
trace_i915_request_retire(request); GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
advance_ring(request); advance_ring(rq);
free_capture_list(request);
/* /*
* Walk through the active list, calling retire on each. This allows * Walk through the active list, calling retire on each. This allows
...@@ -260,7 +211,7 @@ static void i915_request_retire(struct i915_request *request) ...@@ -260,7 +211,7 @@ static void i915_request_retire(struct i915_request *request)
* pass along the auxiliary information (to avoid dereferencing * pass along the auxiliary information (to avoid dereferencing
* the node after the callback). * the node after the callback).
*/ */
list_for_each_entry_safe(active, next, &request->active_list, link) { list_for_each_entry_safe(active, next, &rq->active_list, link) {
/* /*
* In microbenchmarks or focusing upon time inside the kernel, * In microbenchmarks or focusing upon time inside the kernel,
* we may spend an inordinate amount of time simply handling * we may spend an inordinate amount of time simply handling
...@@ -276,18 +227,39 @@ static void i915_request_retire(struct i915_request *request) ...@@ -276,18 +227,39 @@ static void i915_request_retire(struct i915_request *request)
INIT_LIST_HEAD(&active->link); INIT_LIST_HEAD(&active->link);
RCU_INIT_POINTER(active->request, NULL); RCU_INIT_POINTER(active->request, NULL);
active->retire(active, request); active->retire(active, rq);
}
local_irq_disable();
spin_lock(&rq->engine->timeline.lock);
list_del(&rq->link);
spin_unlock(&rq->engine->timeline.lock);
spin_lock(&rq->lock);
i915_request_mark_complete(rq);
if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
if (rq->waitboost) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
} }
spin_unlock(&rq->lock);
local_irq_enable();
i915_request_remove_from_client(request); intel_context_exit(rq->hw_context);
intel_context_unpin(rq->hw_context);
__retire_engine_upto(request->engine, request); i915_request_remove_from_client(rq);
intel_context_exit(request->hw_context); free_capture_list(rq);
intel_context_unpin(request->hw_context); i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
i915_sched_node_fini(&request->sched); return true;
i915_request_put(request);
} }
void i915_request_retire_upto(struct i915_request *rq) void i915_request_retire_upto(struct i915_request *rq)
...@@ -309,9 +281,7 @@ void i915_request_retire_upto(struct i915_request *rq) ...@@ -309,9 +281,7 @@ void i915_request_retire_upto(struct i915_request *rq)
do { do {
tmp = list_first_entry(&ring->request_list, tmp = list_first_entry(&ring->request_list,
typeof(*tmp), ring_link); typeof(*tmp), ring_link);
} while (i915_request_retire(tmp) && tmp != rq);
i915_request_retire(tmp);
} while (tmp != rq);
} }
static void irq_execute_cb(struct irq_work *wrk) static void irq_execute_cb(struct irq_work *wrk)
...@@ -600,12 +570,9 @@ static void ring_retire_requests(struct intel_ring *ring) ...@@ -600,12 +570,9 @@ static void ring_retire_requests(struct intel_ring *ring)
{ {
struct i915_request *rq, *rn; struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) { list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
if (!i915_request_completed(rq)) if (!i915_request_retire(rq))
break; break;
i915_request_retire(rq);
}
} }
static noinline struct i915_request * static noinline struct i915_request *
...@@ -620,6 +587,15 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp) ...@@ -620,6 +587,15 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
if (!gfpflags_allow_blocking(gfp)) if (!gfpflags_allow_blocking(gfp))
goto out; goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
i915_request_retire(rq);
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (rq)
return rq;
/* Ratelimit ourselves to prevent oom from malicious clients */ /* Ratelimit ourselves to prevent oom from malicious clients */
rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link); rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
cond_synchronize_rcu(rq->rcustate); cond_synchronize_rcu(rq->rcustate);
...@@ -777,8 +753,7 @@ i915_request_create(struct intel_context *ce) ...@@ -777,8 +753,7 @@ i915_request_create(struct intel_context *ce)
/* Move our oldest request to the slab-cache (if not in use!) */ /* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
i915_request_completed(rq))
i915_request_retire(rq); i915_request_retire(rq);
intel_context_enter(ce); intel_context_enter(ce);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment