Commit 8ea397fa authored by Chris Wilson's avatar Chris Wilson

drm/i915/execlists: Process one CSB update at a time

In the next patch, we will process the CSB events directly from the
submission path, rather than only after a CS interrupt. Hence, we will
no longer have the need for a loop until the has-interrupt bit is clear,
and in the meantime can remove that small optimisation.

v2: Tvrtko pointed out it was safer to unconditionally kick the tasklet
after each irq, when assuming that the tasklet is called for each irq.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180628201211.13837-4-chris@chris-wilson.co.uk
parent d8857d54
...@@ -1497,9 +1497,10 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) ...@@ -1497,9 +1497,10 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
bool tasklet = false; bool tasklet = false;
if (iir & GT_CONTEXT_SWITCH_INTERRUPT) { if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
if (READ_ONCE(engine->execlists.active)) if (READ_ONCE(engine->execlists.active)) {
tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
&engine->irq_posted); tasklet = true;
}
} }
if (iir & GT_RENDER_USER_INTERRUPT) { if (iir & GT_RENDER_USER_INTERRUPT) {
......
...@@ -955,166 +955,162 @@ static void process_csb(struct intel_engine_cs *engine) ...@@ -955,166 +955,162 @@ static void process_csb(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port; struct execlist_port *port = execlists->port;
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
/* The HWSP contains a (cacheable) mirror of the CSB */
const u32 *buf =
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
unsigned int head, tail;
bool fw = false; bool fw = false;
do { /* Clear before reading to catch new interrupts */
/* The HWSP contains a (cacheable) mirror of the CSB */ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
const u32 *buf = smp_mb__after_atomic();
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
unsigned int head, tail;
/* Clear before reading to catch new interrupts */
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
smp_mb__after_atomic();
if (unlikely(execlists->csb_use_mmio)) {
if (!fw) {
intel_uncore_forcewake_get(i915, execlists->fw_domains);
fw = true;
}
buf = (u32 * __force) if (unlikely(execlists->csb_use_mmio)) {
(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); intel_uncore_forcewake_get(i915, execlists->fw_domains);
fw = true;
head = readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); buf = (u32 * __force)
tail = GEN8_CSB_WRITE_PTR(head); (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
head = GEN8_CSB_READ_PTR(head);
execlists->csb_head = head;
} else {
const int write_idx =
intel_hws_csb_write_index(i915) -
I915_HWS_CSB_BUF0_INDEX;
head = execlists->csb_head; head = readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
tail = READ_ONCE(buf[write_idx]); tail = GEN8_CSB_WRITE_PTR(head);
rmb(); /* Hopefully paired with a wmb() in HW */ head = GEN8_CSB_READ_PTR(head);
} execlists->csb_head = head;
GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", } else {
engine->name, const int write_idx =
head, GEN8_CSB_READ_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?", intel_hws_csb_write_index(i915) -
tail, GEN8_CSB_WRITE_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?"); I915_HWS_CSB_BUF0_INDEX;
while (head != tail) { head = execlists->csb_head;
struct i915_request *rq; tail = READ_ONCE(buf[write_idx]);
unsigned int status; rmb(); /* Hopefully paired with a wmb() in HW */
unsigned int count; }
GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
engine->name,
head, GEN8_CSB_READ_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
tail, GEN8_CSB_WRITE_PTR(readl(i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
if (++head == GEN8_CSB_ENTRIES) while (head != tail) {
head = 0; struct i915_request *rq;
unsigned int status;
unsigned int count;
/* if (++head == GEN8_CSB_ENTRIES)
* We are flying near dragons again. head = 0;
*
* We hold a reference to the request in execlist_port[]
* but no more than that. We are operating in softirq
* context and so cannot hold any mutex or sleep. That
* prevents us stopping the requests we are processing
* in port[] from being retired simultaneously (the
* breadcrumb will be complete before we see the
* context-switch). As we only hold the reference to the
* request, any pointer chasing underneath the request
* is subject to a potential use-after-free. Thus we
* store all of the bookkeeping within port[] as
* required, and avoid using unguarded pointers beneath
* request itself. The same applies to the atomic
* status notifier.
*/
status = READ_ONCE(buf[2 * head]); /* maybe mmio! */ /*
GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", * We are flying near dragons again.
engine->name, head, *
status, buf[2*head + 1], * We hold a reference to the request in execlist_port[]
execlists->active); * but no more than that. We are operating in softirq
* context and so cannot hold any mutex or sleep. That
if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | * prevents us stopping the requests we are processing
GEN8_CTX_STATUS_PREEMPTED)) * in port[] from being retired simultaneously (the
execlists_set_active(execlists, * breadcrumb will be complete before we see the
EXECLISTS_ACTIVE_HWACK); * context-switch). As we only hold the reference to the
if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) * request, any pointer chasing underneath the request
execlists_clear_active(execlists, * is subject to a potential use-after-free. Thus we
EXECLISTS_ACTIVE_HWACK); * store all of the bookkeeping within port[] as
* required, and avoid using unguarded pointers beneath
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) * request itself. The same applies to the atomic
continue; * status notifier.
*/
/* We should never get a COMPLETED | IDLE_ACTIVE! */ status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
engine->name, head,
status, buf[2*head + 1],
execlists->active);
if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
GEN8_CTX_STATUS_PREEMPTED))
execlists_set_active(execlists,
EXECLISTS_ACTIVE_HWACK);
if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
execlists_clear_active(execlists,
EXECLISTS_ACTIVE_HWACK);
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
if (status & GEN8_CTX_STATUS_COMPLETE && /* We should never get a COMPLETED | IDLE_ACTIVE! */
buf[2*head + 1] == execlists->preempt_complete_status) { GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
GEM_TRACE("%s preempt-idle\n", engine->name);
complete_preempt_context(execlists);
continue;
}
if (status & GEN8_CTX_STATUS_PREEMPTED && if (status & GEN8_CTX_STATUS_COMPLETE &&
execlists_is_active(execlists, buf[2*head + 1] == execlists->preempt_complete_status) {
EXECLISTS_ACTIVE_PREEMPT)) GEM_TRACE("%s preempt-idle\n", engine->name);
continue; complete_preempt_context(execlists);
continue;
}
GEM_BUG_ON(!execlists_is_active(execlists, if (status & GEN8_CTX_STATUS_PREEMPTED &&
EXECLISTS_ACTIVE_USER)); execlists_is_active(execlists,
EXECLISTS_ACTIVE_PREEMPT))
continue;
rq = port_unpack(port, &count); GEM_BUG_ON(!execlists_is_active(execlists,
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", EXECLISTS_ACTIVE_USER));
engine->name,
port->context_id, count,
rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */ rq = port_unpack(port, &count);
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
engine->name,
port->context_id, count,
rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
GEM_BUG_ON(count == 0);
if (--count == 0) {
/*
* On the final event corresponding to the
* submission of this context, we expect either
* an element-switch event or a completion
* event (and on completion, the active-idle
* marker). No more preemptions, lite-restore
* or otherwise.
*/
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
GEM_BUG_ON(!port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
GEM_BUG_ON(count == 0); /*
if (--count == 0) { * We rely on the hardware being strongly
/* * ordered, that the breadcrumb write is
* On the final event corresponding to the * coherent (visible from the CPU) before the
* submission of this context, we expect either * user interrupt and CSB is processed.
* an element-switch event or a completion */
* event (and on completion, the active-idle GEM_BUG_ON(!i915_request_completed(rq));
* marker). No more preemptions, lite-restore
* or otherwise.
*/
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
GEM_BUG_ON(!port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
/* execlists_context_schedule_out(rq,
* We rely on the hardware being strongly INTEL_CONTEXT_SCHEDULE_OUT);
* ordered, that the breadcrumb write is i915_request_put(rq);
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
GEM_BUG_ON(!i915_request_completed(rq));
execlists_context_schedule_out(rq,
INTEL_CONTEXT_SCHEDULE_OUT);
i915_request_put(rq);
GEM_TRACE("%s completed ctx=%d\n",
engine->name, port->context_id);
port = execlists_port_complete(execlists, port);
if (port_isset(port))
execlists_user_begin(execlists, port);
else
execlists_user_end(execlists);
} else {
port_set(port, port_pack(rq, count));
}
}
if (head != execlists->csb_head) { GEM_TRACE("%s completed ctx=%d\n",
execlists->csb_head = head; engine->name, port->context_id);
writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); port = execlists_port_complete(execlists, port);
if (port_isset(port))
execlists_user_begin(execlists, port);
else
execlists_user_end(execlists);
} else {
port_set(port, port_pack(rq, count));
} }
} while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)); }
if (head != execlists->csb_head) {
execlists->csb_head = head;
writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
}
if (unlikely(fw)) if (unlikely(fw))
intel_uncore_forcewake_put(i915, execlists->fw_domains); intel_uncore_forcewake_put(i915, execlists->fw_domains);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment