Commit d0aa694b authored by Chris Wilson's avatar Chris Wilson

drm/i915/pmu: Always sample an active ringbuffer

As we no longer have a precise indication of requests queued to an
engine, make no presumptions and just sample the ring registers to see
if the engine is busy.

v2: Report busy while the ring is idling on a semaphore/event.
v3: Give the struct a name!
v4: Always 0 outside the powerwell; trusting the powerwell is
accurate enough for our sampling pmu.
v5: Protect against gen7 mmio madness and try to improve grammar
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190223000102.14290-1-chris@chris-wilson.co.uk
parent 0a3317d4
...@@ -148,14 +148,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) ...@@ -148,14 +148,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
spin_unlock_irq(&i915->pmu.lock); spin_unlock_irq(&i915->pmu.lock);
} }
static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
{
if (!fw)
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
return true;
}
static void static void
add_sample(struct i915_pmu_sample *sample, u32 val) add_sample(struct i915_pmu_sample *sample, u32 val)
{ {
...@@ -168,49 +160,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) ...@@ -168,49 +160,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
bool fw = false; unsigned long flags;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return; return;
if (!dev_priv->gt.awake) wakeref = 0;
return; if (READ_ONCE(dev_priv->gt.awake))
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref) if (!wakeref)
return; return;
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
u32 current_seqno = intel_engine_get_seqno(engine); struct intel_engine_pmu *pmu = &engine->pmu;
u32 last_seqno = intel_engine_last_submit(engine); bool busy;
u32 val; u32 val;
val = !i915_seqno_passed(current_seqno, last_seqno); val = I915_READ_FW(RING_CTL(engine->mmio_base));
if (val == 0) /* powerwell off => engine idle */
if (val) continue;
add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
period_ns);
if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
fw = grab_forcewake(dev_priv, fw);
val = I915_READ_FW(RING_CTL(engine->mmio_base));
} else {
val = 0;
}
if (val & RING_WAIT) if (val & RING_WAIT)
add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
period_ns);
if (val & RING_WAIT_SEMAPHORE) if (val & RING_WAIT_SEMAPHORE)
add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
period_ns);
}
if (fw) /*
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); * While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
* execlists sampling, we account for the ring waiting as the
* engine being busy. Therefore, we record the sample as being
* busy if either waiting or !idle.
*/
busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
if (!busy) {
val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
busy = !(val & MODE_IDLE);
}
if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
intel_runtime_pm_put(dev_priv, wakeref); intel_runtime_pm_put(dev_priv, wakeref);
} }
......
...@@ -392,7 +392,7 @@ struct intel_engine_cs { ...@@ -392,7 +392,7 @@ struct intel_engine_cs {
bool irq_armed; bool irq_armed;
} breadcrumbs; } breadcrumbs;
struct { struct intel_engine_pmu {
/** /**
* @enable: Bitmask of enable sample events on this engine. * @enable: Bitmask of enable sample events on this engine.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment