Commit c36eebd9 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: execlists->active is serialised by the tasklet

The active/pending execlists is no longer protected by the
engine->active.lock, but is serialised by the tasklet instead. Update
the locking around the debug and stats to follow suit.

v2: local_bh_disable() to prevent recursing into the tasklet in case we
trigger a softirq (Tvrtko)

Fixes: df403069 ("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191009160906.16195-1-chris@chris-wilson.co.uk
parent c949ae43
...@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists) ...@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
return READ_ONCE(*execlists->active); return READ_ONCE(*execlists->active);
} }
static inline void
execlists_active_lock_bh(struct intel_engine_execlists *execlists)
{
local_bh_disable(); /* prevent local softirq and lock recursion */
tasklet_lock(&execlists->tasklet);
}
static inline void
execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
{
tasklet_unlock(&execlists->tasklet);
local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
}
struct i915_request * struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
......
...@@ -1245,9 +1245,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1245,9 +1245,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m) struct drm_printer *m)
{ {
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
const struct intel_engine_execlists * const execlists = struct intel_engine_execlists * const execlists = &engine->execlists;
&engine->execlists;
unsigned long flags;
u64 addr; u64 addr;
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
...@@ -1329,7 +1327,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1329,7 +1327,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]); idx, hws[idx * 2], hws[idx * 2 + 1]);
} }
spin_lock_irqsave(&engine->active.lock, flags); execlists_active_lock_bh(execlists);
for (port = execlists->active; (rq = *port); port++) { for (port = execlists->active; (rq = *port); port++) {
char hdr[80]; char hdr[80];
int len; int len;
...@@ -1367,7 +1365,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1367,7 +1365,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (tl) if (tl)
intel_timeline_put(tl); intel_timeline_put(tl);
} }
spin_unlock_irqrestore(&engine->active.lock, flags); execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) { } else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE)); ENGINE_READ(engine, RING_PP_DIR_BASE));
...@@ -1509,8 +1507,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1509,8 +1507,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine)) if (!intel_engine_supports_stats(engine))
return -ENODEV; return -ENODEV;
spin_lock_irqsave(&engine->active.lock, flags); execlists_active_lock_bh(execlists);
write_seqlock(&engine->stats.lock); write_seqlock_irqsave(&engine->stats.lock, flags);
if (unlikely(engine->stats.enabled == ~0)) { if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY; err = -EBUSY;
...@@ -1538,8 +1536,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1538,8 +1536,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
} }
unlock: unlock:
write_sequnlock(&engine->stats.lock); write_sequnlock_irqrestore(&engine->stats.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags); execlists_active_unlock_bh(execlists);
return err; return err;
} }
......
...@@ -77,6 +77,12 @@ struct drm_i915_private; ...@@ -77,6 +77,12 @@ struct drm_i915_private;
#define I915_GEM_IDLE_TIMEOUT (HZ / 5) #define I915_GEM_IDLE_TIMEOUT (HZ / 5)
static inline void tasklet_lock(struct tasklet_struct *t)
{
while (!tasklet_trylock(t))
cpu_relax();
}
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{ {
if (!atomic_fetch_inc(&t->count)) if (!atomic_fetch_inc(&t->count))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment