Commit 5af05fef authored by Michel Thierry's avatar Michel Thierry Committed by Daniel Vetter

drm/i915/lrc: Prevent preemption when lite-restore is disabled

When WaEnableForceRestoreInCtxtDescForVCS is required, it is only
safe to send new contexts if the last reported event is "active to
idle". Otherwise the same context can fully preempt itself because
lite-restore is disabled.

Testcase: igt/gem_concurrent_blit
Reported-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMichel Thierry <michel.thierry@intel.com>
Reviewed-by: default avatarArun Siluvery <arun.siluvery@linux.intel.com>
Tested-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ec72d588
...@@ -277,10 +277,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) ...@@ -277,10 +277,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
return lrca >> 12; return lrca >> 12;
} }
static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
(ring->id == VCS || ring->id == VCS2);
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx, uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
uint64_t desc; uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
...@@ -302,9 +310,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, ...@@ -302,9 +310,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
/* WaEnableForceRestoreInCtxtDescForVCS:skl */ /* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */ /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
if (((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || if (disable_lite_restore_wa(ring))
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
(ring->id == VCS || ring->id == VCS2))
desc |= GEN8_CTX_FORCE_RESTORE; desc |= GEN8_CTX_FORCE_RESTORE;
return desc; return desc;
...@@ -495,7 +501,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) ...@@ -495,7 +501,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
u32 status_pointer; u32 status_pointer;
u8 read_pointer; u8 read_pointer;
u8 write_pointer; u8 write_pointer;
u32 status; u32 status = 0;
u32 status_id; u32 status_id;
u32 submit_contexts = 0; u32 submit_contexts = 0;
...@@ -533,8 +539,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) ...@@ -533,8 +539,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
} }
} }
if (submit_contexts != 0) if (disable_lite_restore_wa(ring)) {
/* Prevent a ctx to preempt itself */
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
(submit_contexts != 0))
execlists_context_unqueue(ring);
} else if (submit_contexts != 0) {
execlists_context_unqueue(ring); execlists_context_unqueue(ring);
}
spin_unlock(&ring->execlist_lock); spin_unlock(&ring->execlist_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment