Commit 700d6ab9 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2020-03-27' of...

Merge tag 'drm-intel-next-fixes-2020-03-27' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Fixes for instability on Baytrail and Haswell;
Ice Lake RPS; Sandy Bridge RC6; and few others around
GT hangchec/reset; livelock; and a null dereference.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200327081607.GA3082710@intel.com
parents c0ca5437 2bdd4c28
...@@ -14748,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev, ...@@ -14748,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev,
/* Catch I915_MODE_FLAG_INHERITED */ /* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) { new_crtc_state, i) {
if (new_crtc_state->hw.mode.private_flags != if (new_crtc_state->uapi.mode.private_flags !=
old_crtc_state->hw.mode.private_flags) old_crtc_state->uapi.mode.private_flags)
new_crtc_state->uapi.mode_changed = true; new_crtc_state->uapi.mode_changed = true;
} }
......
...@@ -574,7 +574,7 @@ static void engines_idle_release(struct i915_gem_context *ctx, ...@@ -574,7 +574,7 @@ static void engines_idle_release(struct i915_gem_context *ctx,
int err = 0; int err = 0;
/* serialises with execbuf */ /* serialises with execbuf */
RCU_INIT_POINTER(ce->gem_context, NULL); set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
if (!intel_context_pin_if_active(ce)) if (!intel_context_pin_if_active(ce))
continue; continue;
......
...@@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx) ...@@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
static inline struct intel_context * static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{ {
struct intel_context *ce = ERR_PTR(-EINVAL); struct intel_context *ce;
rcu_read_lock(); { rcu_read_lock(); {
struct i915_gem_engines *e = rcu_dereference(ctx->engines); struct i915_gem_engines *e = rcu_dereference(ctx->engines);
if (likely(idx < e->num_engines && e->engines[idx])) if (unlikely(!e)) /* context was closed! */
ce = ERR_PTR(-ENOENT);
else if (likely(idx < e->num_engines && e->engines[idx]))
ce = intel_context_get(e->engines[idx]); ce = intel_context_get(e->engines[idx]);
else
ce = ERR_PTR(-EINVAL);
} rcu_read_unlock(); } rcu_read_unlock();
return ce; return ce;
......
...@@ -2316,7 +2316,7 @@ static void eb_request_add(struct i915_execbuffer *eb) ...@@ -2316,7 +2316,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
prev = __i915_request_commit(rq); prev = __i915_request_commit(rq);
/* Check that the context wasn't destroyed before submission */ /* Check that the context wasn't destroyed before submission */
if (likely(rcu_access_pointer(eb->context->gem_context))) { if (likely(!intel_context_is_closed(eb->context))) {
attr = eb->gem_context->sched; attr = eb->gem_context->sched;
/* /*
......
...@@ -97,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce) ...@@ -97,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce)
{ {
int err; int err;
GEM_BUG_ON(intel_context_is_closed(ce));
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce); err = intel_context_alloc_state(ce);
if (err) if (err)
......
...@@ -173,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce) ...@@ -173,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce)
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
} }
static inline bool intel_context_is_closed(const struct intel_context *ce)
{
return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
}
static inline bool intel_context_use_semaphores(const struct intel_context *ce) static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{ {
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
......
...@@ -62,10 +62,11 @@ struct intel_context { ...@@ -62,10 +62,11 @@ struct intel_context {
#define CONTEXT_BARRIER_BIT 0 #define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1 #define CONTEXT_ALLOC_BIT 1
#define CONTEXT_VALID_BIT 2 #define CONTEXT_VALID_BIT 2
#define CONTEXT_USE_SEMAPHORES 3 #define CONTEXT_CLOSED_BIT 3
#define CONTEXT_BANNED 4 #define CONTEXT_USE_SEMAPHORES 4
#define CONTEXT_FORCE_SINGLE_SUBMISSION 5 #define CONTEXT_BANNED 5
#define CONTEXT_NOPREEMPT 6 #define CONTEXT_FORCE_SINGLE_SUBMISSION 6
#define CONTEXT_NOPREEMPT 7
u32 *lrc_reg_state; u32 *lrc_reg_state;
u64 lrc_desc; u64 lrc_desc;
......
...@@ -1663,7 +1663,7 @@ static bool virtual_matches(const struct virtual_engine *ve, ...@@ -1663,7 +1663,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
} }
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
struct intel_engine_cs *engine) struct i915_request *rq)
{ {
struct intel_engine_cs *old = ve->siblings[0]; struct intel_engine_cs *old = ve->siblings[0];
...@@ -1671,9 +1671,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, ...@@ -1671,9 +1671,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_lock(&old->breadcrumbs.irq_lock); spin_lock(&old->breadcrumbs.irq_lock);
if (!list_empty(&ve->context.signal_link)) { if (!list_empty(&ve->context.signal_link)) {
list_move_tail(&ve->context.signal_link, list_del_init(&ve->context.signal_link);
&engine->breadcrumbs.signalers);
intel_engine_signal_breadcrumbs(engine); /*
* We cannot acquire the new engine->breadcrumbs.irq_lock
* (as we are holding a breadcrumbs.irq_lock already),
* so attach this request to the signaler on submission.
* The queued irq_work will occur when we finally drop
* the engine->active.lock after dequeue.
*/
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
/* Also transfer the pending irq_work for the old breadcrumb. */
intel_engine_signal_breadcrumbs(rq->engine);
} }
spin_unlock(&old->breadcrumbs.irq_lock); spin_unlock(&old->breadcrumbs.irq_lock);
} }
...@@ -2045,7 +2055,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2045,7 +2055,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine); engine);
if (!list_empty(&ve->context.signals)) if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve, engine); virtual_xfer_breadcrumbs(ve, rq);
/* /*
* Move the bound engine to the top of the list * Move the bound engine to the top of the list
......
...@@ -603,6 +603,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6) ...@@ -603,6 +603,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6)
void intel_rc6_park(struct intel_rc6 *rc6) void intel_rc6_park(struct intel_rc6 *rc6)
{ {
struct intel_uncore *uncore = rc6_to_uncore(rc6); struct intel_uncore *uncore = rc6_to_uncore(rc6);
unsigned int target;
if (!rc6->enabled) if (!rc6->enabled)
return; return;
...@@ -617,7 +618,14 @@ void intel_rc6_park(struct intel_rc6 *rc6) ...@@ -617,7 +618,14 @@ void intel_rc6_park(struct intel_rc6 *rc6)
/* Turn off the HW timers and go directly to rc6 */ /* Turn off the HW timers and go directly to rc6 */
set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE); set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
if (HAS_RC6pp(rc6_to_i915(rc6)))
target = 0x6; /* deepest rc6 */
else if (HAS_RC6p(rc6_to_i915(rc6)))
target = 0x5; /* deep rc6 */
else
target = 0x4; /* normal rc6 */
set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
} }
void intel_rc6_disable(struct intel_rc6 *rc6) void intel_rc6_disable(struct intel_rc6 *rc6)
......
...@@ -88,6 +88,11 @@ static bool mark_guilty(struct i915_request *rq) ...@@ -88,6 +88,11 @@ static bool mark_guilty(struct i915_request *rq)
bool banned; bool banned;
int i; int i;
if (intel_context_is_closed(rq->context)) {
intel_context_set_banned(rq->context);
return true;
}
rcu_read_lock(); rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context); ctx = rcu_dereference(rq->context->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref)) if (ctx && !kref_get_unless_zero(&ctx->ref))
......
...@@ -2088,7 +2088,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) ...@@ -2088,7 +2088,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) { if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine); err = gen7_ctx_switch_bb_init(engine);
if (err) if (err)
goto err_ring_unpin; goto err_ring_unpin;
......
...@@ -770,6 +770,19 @@ void intel_rps_park(struct intel_rps *rps) ...@@ -770,6 +770,19 @@ void intel_rps_park(struct intel_rps *rps)
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
rps_set(rps, rps->idle_freq, false); rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
/*
* Since we will try and restart from the previously requested
* frequency on unparking, treat this idle point as a downclock
* interrupt and reduce the frequency for resume. If we park/unpark
* more frequently than the rps worker can run, we will not respond
* to any EI and never see a change in frequency.
*
* (Note we accommodate Cherryview's limitation of only using an
* even bin by applying it to all.)
*/
rps->cur_freq =
max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
} }
void intel_rps_boost(struct i915_request *rq) void intel_rps_boost(struct i915_request *rq)
......
...@@ -12,6 +12,21 @@ ...@@ -12,6 +12,21 @@
#include "selftests/i915_random.h" #include "selftests/i915_random.h"
static u64 rc6_residency(struct intel_rc6 *rc6)
{
u64 result;
/* XXX VLV_GT_MEDIA_RC6? */
result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
if (HAS_RC6p(rc6_to_i915(rc6)))
result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
if (HAS_RC6pp(rc6_to_i915(rc6)))
result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
return result;
}
int live_rc6_manual(void *arg) int live_rc6_manual(void *arg)
{ {
struct intel_gt *gt = arg; struct intel_gt *gt = arg;
...@@ -38,9 +53,9 @@ int live_rc6_manual(void *arg) ...@@ -38,9 +53,9 @@ int live_rc6_manual(void *arg)
__intel_rc6_disable(rc6); __intel_rc6_disable(rc6);
msleep(1); /* wakeup is not immediate, takes about 100us on icl */ msleep(1); /* wakeup is not immediate, takes about 100us on icl */
res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); res[0] = rc6_residency(rc6);
msleep(250); msleep(250);
res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) { if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n", pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
(res[1] - res[0]) >> 10); (res[1] - res[0]) >> 10);
...@@ -51,9 +66,9 @@ int live_rc6_manual(void *arg) ...@@ -51,9 +66,9 @@ int live_rc6_manual(void *arg)
/* Manually enter RC6 */ /* Manually enter RC6 */
intel_rc6_park(rc6); intel_rc6_park(rc6);
res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); res[0] = rc6_residency(rc6);
msleep(100); msleep(100);
res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); res[1] = rc6_residency(rc6);
if (res[1] == res[0]) { if (res[1] == res[0]) {
pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n", pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
......
...@@ -2700,6 +2700,14 @@ static void gen12_oa_disable(struct i915_perf_stream *stream) ...@@ -2700,6 +2700,14 @@ static void gen12_oa_disable(struct i915_perf_stream *stream)
50)) 50))
drm_err(&stream->perf->i915->drm, drm_err(&stream->perf->i915->drm,
"wait for OA to be disabled timed out\n"); "wait for OA to be disabled timed out\n");
intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
if (intel_wait_for_register(uncore,
GEN12_OA_TLB_INV_CR,
1, 0,
50))
drm_err(&stream->perf->i915->drm,
"wait for OA tlb invalidate timed out\n");
} }
/** /**
......
...@@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3) #define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3) #define OABUFFER_SIZE_16M (7 << 3)
#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
/* Gen12 OAR unit */ /* Gen12 OAR unit */
#define GEN12_OAR_OACONTROL _MMIO(0x2960) #define GEN12_OAR_OACONTROL _MMIO(0x2960)
#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 #define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
......
...@@ -1097,6 +1097,7 @@ void i915_vma_release(struct kref *ref) ...@@ -1097,6 +1097,7 @@ void i915_vma_release(struct kref *ref)
void i915_vma_parked(struct intel_gt *gt) void i915_vma_parked(struct intel_gt *gt)
{ {
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
LIST_HEAD(closed);
spin_lock_irq(&gt->closed_lock); spin_lock_irq(&gt->closed_lock);
list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) { list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
...@@ -1108,28 +1109,26 @@ void i915_vma_parked(struct intel_gt *gt) ...@@ -1108,28 +1109,26 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount)) if (!kref_get_unless_zero(&obj->base.refcount))
continue; continue;
if (i915_vm_tryopen(vm)) { if (!i915_vm_tryopen(vm)) {
list_del_init(&vma->closed_link);
} else {
i915_gem_object_put(obj); i915_gem_object_put(obj);
obj = NULL; continue;
} }
spin_unlock_irq(&gt->closed_lock); list_move(&vma->closed_link, &closed);
}
spin_unlock_irq(&gt->closed_lock);
if (obj) { /* As the GT is held idle, no vma can be reopened as we destroy them */
__i915_vma_put(vma); list_for_each_entry_safe(vma, next, &closed, closed_link) {
i915_gem_object_put(obj); struct drm_i915_gem_object *obj = vma->obj;
} struct i915_address_space *vm = vma->vm;
i915_vm_close(vm); INIT_LIST_HEAD(&vma->closed_link);
__i915_vma_put(vma);
/* Restart after dropping lock */ i915_gem_object_put(obj);
spin_lock_irq(&gt->closed_lock); i915_vm_close(vm);
next = list_first_entry(&gt->closed_vma,
typeof(*next), closed_link);
} }
spin_unlock_irq(&gt->closed_lock);
} }
static void __i915_vma_iounmap(struct i915_vma *vma) static void __i915_vma_iounmap(struct i915_vma *vma)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment