Commit 9e2750fc authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Keep track of request among the scheduling lists

If we keep track of when the i915_request.sched.link is on the HW
runlist, or in the priority queue we can simplify our interactions with
the request (such as during rescheduling). This also simplifies the next
patch where we introduce a new in-between list, for requests that are
ready but neither on the run list or in the queue.

v2: Update i915_sched_node.link explanation for current usage where it
is a link on both the queue and on the runlists.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200116184754.2860848-1-chris@chris-wilson.co.uk
(cherry picked from commit 672c368f)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent cc3251d8
...@@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
list_move(&rq->sched.link, pl); list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
active = rq; active = rq;
} else { } else {
struct intel_engine_cs *owner = rq->context->engine; struct intel_engine_cs *owner = rq->context->engine;
...@@ -2431,11 +2433,12 @@ static void execlists_preempt(struct timer_list *timer) ...@@ -2431,11 +2433,12 @@ static void execlists_preempt(struct timer_list *timer)
} }
static void queue_request(struct intel_engine_cs *engine, static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node, struct i915_request *rq)
int prio)
{ {
GEM_BUG_ON(!list_empty(&node->link)); GEM_BUG_ON(!list_empty(&rq->sched.link));
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(engine, rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
} }
static void __submit_queue_imm(struct intel_engine_cs *engine) static void __submit_queue_imm(struct intel_engine_cs *engine)
...@@ -2471,7 +2474,7 @@ static void execlists_submit_request(struct i915_request *request) ...@@ -2471,7 +2474,7 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */ /* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
queue_request(engine, &request->sched, rq_prio(request)); queue_request(engine, request);
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link)); GEM_BUG_ON(list_empty(&request->sched.link));
......
...@@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request) ...@@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
xfer: /* We may be recursing from the signal callback of another i915 fence */ xfer: /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests); list_move_tail(&request->sched.link, &engine->active.requests);
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
}
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
......
...@@ -70,6 +70,18 @@ enum { ...@@ -70,6 +70,18 @@ enum {
*/ */
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS, I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
/*
* I915_FENCE_FLAG_PQUEUE - this request is ready for execution
*
* Using the scheduler, when a request is ready for execution it is put
* into the priority queue, and removed from that queue when transferred
* to the HW runlists. We want to track its membership within the
* priority queue so that we can easily check before rescheduling.
*
* See i915_request_in_priority_queue()
*/
I915_FENCE_FLAG_PQUEUE,
/* /*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
* *
...@@ -361,6 +373,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq) ...@@ -361,6 +373,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
} }
static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
{
return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
/** /**
* Returns true if seq1 is later than seq2. * Returns true if seq1 is later than seq2.
*/ */
......
...@@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
node->attr.priority = prio; node->attr.priority = prio;
if (list_empty(&node->link)) {
/* /*
* If the request is not in the priolist queue because * Once the request is ready, it will be placed into the
* it is not yet runnable, then it doesn't contribute * priority lists and then onto the HW runlist. Before the
* to our preemption decisions. On the other hand, * request is ready, it does not contribute to our preemption
* if the request is on the HW, it too is not in the * decisions and we can safely ignore it, as it will, and
* queue; but in that case we may still need to reorder * any preemption required, be dealt with upon submission.
* the inflight requests. * See engine->submit_request()
*/ */
if (list_empty(&node->link))
continue; continue;
}
if (!intel_engine_is_virtual(engine) && if (i915_request_in_priority_queue(node_to_request(node))) {
!i915_request_is_active(node_to_request(node))) {
if (!cache.priolist) if (!cache.priolist)
cache.priolist = cache.priolist =
i915_sched_lookup_priolist(engine, i915_sched_lookup_priolist(engine,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment