Commit 7a62cc61 authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Mika Kuoppala

drm/i915: Add execlist_port_complete

When first execlist entry is processed, we move the port (contents).
Introduce function for this as execlist and guc use this common
operation.

v2: rebase. s/GEM_DEBUG_BUG/GEM_BUG (Chris)
v3: rebase
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20170922124307.10914-4-mika.kuoppala@intel.com
parent cf4591d1
...@@ -592,7 +592,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -592,7 +592,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
rq->priotree.priority = INT_MAX; rq->priotree.priority = INT_MAX;
__i915_gem_request_submit(rq); __i915_gem_request_submit(rq);
trace_i915_gem_request_in(rq, port_index(port, engine)); trace_i915_gem_request_in(rq, port_index(port, execlists));
last = rq; last = rq;
submit = true; submit = true;
} }
...@@ -615,7 +615,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -615,7 +615,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
static void i915_guc_irq_handler(unsigned long data) static void i915_guc_irq_handler(unsigned long data)
{ {
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct execlist_port *port = engine->execlists.port; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
rq = port_request(&port[0]); rq = port_request(&port[0]);
...@@ -623,8 +624,7 @@ static void i915_guc_irq_handler(unsigned long data) ...@@ -623,8 +624,7 @@ static void i915_guc_irq_handler(unsigned long data)
trace_i915_gem_request_out(rq); trace_i915_gem_request_out(rq);
i915_gem_request_put(rq); i915_gem_request_put(rq);
port[0] = port[1]; execlists_port_complete(execlists, port);
memset(&port[1], 0, sizeof(port[1]));
rq = port_request(&port[0]); rq = port_request(&port[0]);
} }
......
...@@ -454,7 +454,8 @@ static void port_assign(struct execlist_port *port, ...@@ -454,7 +454,8 @@ static void port_assign(struct execlist_port *port,
static void execlists_dequeue(struct intel_engine_cs *engine) static void execlists_dequeue(struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *last; struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlists.port; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct rb_node *rb; struct rb_node *rb;
bool submit = false; bool submit = false;
...@@ -468,8 +469,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -468,8 +469,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/ */
last->tail = last->wa_tail; last->tail = last->wa_tail;
GEM_BUG_ON(port_isset(&port[1]));
/* Hardware submission is through 2 ports. Conceptually each port /* Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute * static for a context, and unique to each, so we only execute
...@@ -492,8 +491,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -492,8 +491,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/ */
spin_lock_irq(&engine->timeline->lock); spin_lock_irq(&engine->timeline->lock);
rb = engine->execlists.first; rb = execlists->first;
GEM_BUG_ON(rb_first(&engine->execlists.queue) != rb); GEM_BUG_ON(rb_first(&execlists->queue) != rb);
while (rb) { while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn; struct drm_i915_gem_request *rq, *rn;
...@@ -516,7 +515,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -516,7 +515,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we * combine this request with the last, then we
* are done. * are done.
*/ */
if (port != engine->execlists.port) { if (port != execlists->port) {
__list_del_many(&p->requests, __list_del_many(&p->requests,
&rq->priotree.link); &rq->priotree.link);
goto done; goto done;
...@@ -541,25 +540,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -541,25 +540,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (submit) if (submit)
port_assign(port, last); port_assign(port, last);
port++; port++;
GEM_BUG_ON(port_isset(port));
} }
INIT_LIST_HEAD(&rq->priotree.link); INIT_LIST_HEAD(&rq->priotree.link);
rq->priotree.priority = INT_MAX; rq->priotree.priority = INT_MAX;
__i915_gem_request_submit(rq); __i915_gem_request_submit(rq);
trace_i915_gem_request_in(rq, port_index(port, engine)); trace_i915_gem_request_in(rq, port_index(port, execlists));
last = rq; last = rq;
submit = true; submit = true;
} }
rb = rb_next(rb); rb = rb_next(rb);
rb_erase(&p->node, &engine->execlists.queue); rb_erase(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests); INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p); kmem_cache_free(engine->i915->priorities, p);
} }
done: done:
engine->execlists.first = rb; execlists->first = rb;
if (submit) if (submit)
port_assign(port, last); port_assign(port, last);
spin_unlock_irq(&engine->timeline->lock); spin_unlock_irq(&engine->timeline->lock);
...@@ -748,8 +749,7 @@ static void intel_lrc_irq_handler(unsigned long data) ...@@ -748,8 +749,7 @@ static void intel_lrc_irq_handler(unsigned long data)
trace_i915_gem_request_out(rq); trace_i915_gem_request_out(rq);
i915_gem_request_put(rq); i915_gem_request_put(rq);
port[0] = port[1]; execlists_port_complete(execlists, port);
memset(&port[1], 0, sizeof(port[1]));
} else { } else {
port_set(port, port_pack(rq, count)); port_set(port, port_pack(rq, count));
} }
......
...@@ -228,7 +228,7 @@ struct intel_engine_execlists { ...@@ -228,7 +228,7 @@ struct intel_engine_execlists {
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed)) #define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count) #define port_isset(p) ((p)->request_count)
#define port_index(p, e) ((p) - (e)->execlists.port) #define port_index(p, execlists) ((p) - (execlists)->port)
/** /**
* @context_id: context ID for port * @context_id: context ID for port
...@@ -511,6 +511,18 @@ struct intel_engine_cs { ...@@ -511,6 +511,18 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header); u32 (*get_cmd_length_mask)(u32 cmd_header);
}; };
static inline void
execlists_port_complete(struct intel_engine_execlists * const execlists,
struct execlist_port * const port)
{
struct execlist_port * const port1 = &execlists->port[1];
GEM_BUG_ON(port_index(port, execlists) != 0);
*port = *port1;
memset(port1, 0, sizeof(struct execlist_port));
}
static inline unsigned int static inline unsigned int
intel_engine_flag(const struct intel_engine_cs *engine) intel_engine_flag(const struct intel_engine_cs *engine)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment