Commit f747026c authored by Chris Wilson's avatar Chris Wilson

drm/i915: Only run execlist context-switch handler after an interrupt

Mark when we run the execlist tasklet following the interrupt, so we
don't probe a potentially uninitialised register when submitting the
contexts multiple times before the hardware responds.

v2: Use a shared engine->irq_posted
v3: Always use locked bitops to be sure of atomicity wrt to other bits
in the mask.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170124152021.26587-1-chris@chris-wilson.co.ukReviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
parent 538b257d
...@@ -1349,8 +1349,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) ...@@ -1349,8 +1349,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{ {
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(engine); notify_ring(engine);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
tasklet_schedule(&engine->irq_tasklet); if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet_hi_schedule(&engine->irq_tasklet);
}
} }
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
......
...@@ -564,7 +564,7 @@ static void intel_lrc_irq_handler(unsigned long data) ...@@ -564,7 +564,7 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_get(dev_priv, engine->fw_domains); intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
if (!execlists_elsp_idle(engine)) { while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
u32 __iomem *csb_mmio = u32 __iomem *csb_mmio =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
u32 __iomem *buf = u32 __iomem *buf =
...@@ -1297,6 +1297,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) ...@@ -1297,6 +1297,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
/* After a GPU reset, we may have requests to replay */ /* After a GPU reset, we may have requests to replay */
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
if (!execlists_elsp_idle(engine)) { if (!execlists_elsp_idle(engine)) {
engine->execlist_port[0].count = 0; engine->execlist_port[0].count = 0;
engine->execlist_port[1].count = 0; engine->execlist_port[1].count = 0;
......
...@@ -213,6 +213,7 @@ struct intel_engine_cs { ...@@ -213,6 +213,7 @@ struct intel_engine_cs {
unsigned long irq_posted; unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0 #define ENGINE_IRQ_BREADCRUMB 0
#define ENGINE_IRQ_EXECLIST 1
/* Rather than have every client wait upon all user interrupts, /* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the * with the herd waking after every interrupt and each doing the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment