Commit c0bb487d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Only enqueue already completed requests

If we are asked to submit a completed request, just move it onto the
active-list without modifying it's payload. If we try to emit the
modified payload of a completed request, we risk racing with the
ring->head update during retirement which may advance the head past our
breadcrumb and so we generate a warning for the emission being behind
the RING_HEAD.

v2: Commentary for the sneaky, shared responsibility between functions.
v3: Spelling mistakes and bonus assertion
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190923110056.15176-3-chris@chris-wilson.co.uk
parent 3231f8c0
...@@ -799,6 +799,17 @@ static bool can_merge_rq(const struct i915_request *prev, ...@@ -799,6 +799,17 @@ static bool can_merge_rq(const struct i915_request *prev,
GEM_BUG_ON(prev == next); GEM_BUG_ON(prev == next);
GEM_BUG_ON(!assert_priority_queue(prev, next)); GEM_BUG_ON(!assert_priority_queue(prev, next));
/*
* We do not submit known completed requests. Therefore if the next
* request is already completed, we can pretend to merge it in
* with the previous context (and we will skip updating the ELSP
* and tracking). Thus hopefully keeping the ELSP full with active
* contexts, despite the best efforts of preempt-to-busy to confuse
* us.
*/
if (i915_request_completed(next))
return true;
if (!can_merge_ctx(prev->hw_context, next->hw_context)) if (!can_merge_ctx(prev->hw_context, next->hw_context))
return false; return false;
...@@ -1181,21 +1192,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1181,21 +1192,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
continue; continue;
} }
if (i915_request_completed(rq)) {
ve->request = NULL;
ve->base.execlists.queue_priority_hint = INT_MIN;
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
rq->engine = engine;
__i915_request_submit(rq);
spin_unlock(&ve->base.active.lock);
rb = rb_first_cached(&execlists->virtual);
continue;
}
if (last && !can_merge_rq(last, rq)) { if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
return; /* leave this for another */ return; /* leave this for another */
...@@ -1249,11 +1245,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1249,11 +1245,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(ve->siblings[0] != engine); GEM_BUG_ON(ve->siblings[0] != engine);
} }
__i915_request_submit(rq); if (__i915_request_submit(rq)) {
if (!i915_request_completed(rq)) {
submit = true; submit = true;
last = rq; last = rq;
} }
/*
* Hmm, we have a bunch of virtual engine requests,
* but the first one was already completed (thanks
* preempt-to-busy!). Keep looking at the veng queue
* until we have no more relevant requests (i.e.
* the normal submit queue has higher priority).
*/
if (!submit) {
spin_unlock(&ve->base.active.lock);
rb = rb_first_cached(&execlists->virtual);
continue;
}
} }
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
...@@ -1266,8 +1274,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1266,8 +1274,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i; int i;
priolist_for_each_request_consume(rq, rn, p, i) { priolist_for_each_request_consume(rq, rn, p, i) {
if (i915_request_completed(rq)) bool merge = true;
goto skip;
/* /*
* Can we combine this request with the current port? * Can we combine this request with the current port?
...@@ -1308,14 +1315,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1308,14 +1315,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context)) ctx_single_port_submission(rq->hw_context))
goto done; goto done;
*port = execlists_schedule_in(last, port - execlists->pending); merge = false;
port++;
} }
last = rq; if (__i915_request_submit(rq)) {
submit = true; if (!merge) {
skip: *port = execlists_schedule_in(last, port - execlists->pending);
__i915_request_submit(rq); port++;
last = NULL;
}
GEM_BUG_ON(last &&
!can_merge_ctx(last->hw_context,
rq->hw_context));
submit = true;
last = rq;
}
} }
rb_erase_cached(&p->node, &execlists->queue); rb_erase_cached(&p->node, &execlists->queue);
......
...@@ -377,9 +377,10 @@ __i915_request_await_execution(struct i915_request *rq, ...@@ -377,9 +377,10 @@ __i915_request_await_execution(struct i915_request *rq,
return 0; return 0;
} }
void __i915_request_submit(struct i915_request *request) bool __i915_request_submit(struct i915_request *request)
{ {
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
bool result = false;
GEM_TRACE("%s fence %llx:%lld, current %d\n", GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name, engine->name,
...@@ -389,6 +390,25 @@ void __i915_request_submit(struct i915_request *request) ...@@ -389,6 +390,25 @@ void __i915_request_submit(struct i915_request *request)
GEM_BUG_ON(!irqs_disabled()); GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock); lockdep_assert_held(&engine->active.lock);
/*
* With the advent of preempt-to-busy, we frequently encounter
* requests that we have unsubmitted from HW, but left running
* until the next ack and so have completed in the meantime. On
* resubmission of that completed request, we can skip
* updating the payload, and execlists can even skip submitting
* the request.
*
* We must remove the request from the caller's priority queue,
* and the caller must only call us when the request is in their
* priority queue, under the active.lock. This ensures that the
* request has *not* yet been retired and we can safely move
* the request into the engine->active.list where it will be
* dropped upon retiring. (Otherwise if resubmit a *retired*
* request, this would be a horrible use-after-free.)
*/
if (i915_request_completed(request))
goto xfer;
if (i915_gem_context_is_banned(request->gem_context)) if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO); i915_request_skip(request, -EIO);
...@@ -412,13 +432,18 @@ void __i915_request_submit(struct i915_request *request) ...@@ -412,13 +432,18 @@ void __i915_request_submit(struct i915_request *request)
i915_sw_fence_signaled(&request->semaphore)) i915_sw_fence_signaled(&request->semaphore))
engine->saturated |= request->sched.semaphores; engine->saturated |= request->sched.semaphores;
/* We may be recursing from the signal callback of another i915 fence */ engine->emit_fini_breadcrumb(request,
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); request->ring->vaddr + request->postfix);
trace_i915_request_execute(request);
engine->serial++;
result = true;
list_move_tail(&request->sched.link, &engine->active.requests); xfer: /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); list_move_tail(&request->sched.link, &engine->active.requests);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
...@@ -429,12 +454,7 @@ void __i915_request_submit(struct i915_request *request) ...@@ -429,12 +454,7 @@ void __i915_request_submit(struct i915_request *request)
spin_unlock(&request->lock); spin_unlock(&request->lock);
engine->emit_fini_breadcrumb(request, return result;
request->ring->vaddr + request->postfix);
engine->serial++;
trace_i915_request_execute(request);
} }
void i915_request_submit(struct i915_request *request) void i915_request_submit(struct i915_request *request)
......
...@@ -292,7 +292,7 @@ int i915_request_await_execution(struct i915_request *rq, ...@@ -292,7 +292,7 @@ int i915_request_await_execution(struct i915_request *rq,
void i915_request_add(struct i915_request *rq); void i915_request_add(struct i915_request *rq);
void __i915_request_submit(struct i915_request *request); bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request);
void i915_request_skip(struct i915_request *request, int error); void i915_request_skip(struct i915_request *request, int error);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment