Commit 7ac98ff0 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2020-06-18' of...

Merge tag 'drm-intel-fixes-2020-06-18' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix for timeslicing and virtual engines/unpremptable requests
  (+ 1 dependency patch)
- Fixes into TypeC register programming and interrupt storm detecting
- Disable DIP on MST ports with the transcoder clock still on
- Avoid missing GT workarounds at reset for HSW and older gens
- Fix for unwinding multiple requests missing force restore
- Fix encoder type check for DDI vswing sequence
- Build warning fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200618124659.GA12342@jlahtine-desk.ger.corp.intel.com
parents b3a9e3b9 8e68c634
...@@ -2579,14 +2579,14 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, ...@@ -2579,14 +2579,14 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
static void static void
tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
u32 level) u32 level, enum intel_output_type type)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
if (encoder->type == INTEL_OUTPUT_HDMI) { if (type == INTEL_OUTPUT_HDMI) {
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
ddi_translations = tgl_dkl_phy_hdmi_ddi_trans; ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
} else { } else {
...@@ -2638,7 +2638,7 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder, ...@@ -2638,7 +2638,7 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
if (intel_phy_is_combo(dev_priv, phy)) if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type); icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else else
tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level); tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level, type);
} }
static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels) static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
...@@ -2987,7 +2987,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, ...@@ -2987,7 +2987,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
} }
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE); ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */ /* DPPATC */
...@@ -3472,7 +3472,9 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, ...@@ -3472,7 +3472,9 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
INTEL_OUTPUT_DP_MST); INTEL_OUTPUT_DP_MST);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port); enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state); if (!is_mst)
intel_dp_set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
/* /*
* Power down sink before disabling the port, otherwise we end * Power down sink before disabling the port, otherwise we end
......
...@@ -397,6 +397,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, ...@@ -397,6 +397,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
*/ */
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
false); false);
/*
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
if (last_mst_stream)
intel_dp_set_infoframes(&intel_dig_port->base, false,
old_crtc_state, NULL);
/* /*
* From TGL spec: "If multi-stream slave transcoder: Configure * From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder" * Transcoder Clock Select to direct no clock to the transcoder"
......
...@@ -646,7 +646,7 @@ static int engine_setup_common(struct intel_engine_cs *engine) ...@@ -646,7 +646,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
struct measure_breadcrumb { struct measure_breadcrumb {
struct i915_request rq; struct i915_request rq;
struct intel_ring ring; struct intel_ring ring;
u32 cs[1024]; u32 cs[2048];
}; };
static int measure_breadcrumb_dw(struct intel_context *ce) static int measure_breadcrumb_dw(struct intel_context *ce)
...@@ -668,6 +668,8 @@ static int measure_breadcrumb_dw(struct intel_context *ce) ...@@ -668,6 +668,8 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->ring.vaddr = frame->cs; frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs); frame->ring.size = sizeof(frame->cs);
frame->ring.wrap =
BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
frame->ring.effective_size = frame->ring.size; frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring); intel_ring_update_space(&frame->ring);
frame->rq.ring = &frame->ring; frame->rq.ring = &frame->ring;
......
...@@ -1134,6 +1134,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -1134,6 +1134,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl); list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
/* Check in case we rollback so far we wrap [size/2] */
if (intel_ring_direction(rq->ring,
intel_ring_wrap(rq->ring,
rq->tail),
rq->ring->tail) > 0)
rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
active = rq; active = rq;
} else { } else {
struct intel_engine_cs *owner = rq->context->engine; struct intel_engine_cs *owner = rq->context->engine;
...@@ -1498,8 +1505,9 @@ static u64 execlists_update_context(struct i915_request *rq) ...@@ -1498,8 +1505,9 @@ static u64 execlists_update_context(struct i915_request *rq)
* HW has a tendency to ignore us rewinding the TAIL to the end of * HW has a tendency to ignore us rewinding the TAIL to the end of
* an earlier request. * an earlier request.
*/ */
GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
prev = rq->ring->tail;
tail = intel_ring_set_tail(rq->ring, rq->tail); tail = intel_ring_set_tail(rq->ring, rq->tail);
prev = ce->lrc_reg_state[CTX_RING_TAIL];
if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
desc |= CTX_DESC_FORCE_RESTORE; desc |= CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state[CTX_RING_TAIL] = tail; ce->lrc_reg_state[CTX_RING_TAIL] = tail;
...@@ -1895,7 +1903,8 @@ static void defer_active(struct intel_engine_cs *engine) ...@@ -1895,7 +1903,8 @@ static void defer_active(struct intel_engine_cs *engine)
static bool static bool
need_timeslice(const struct intel_engine_cs *engine, need_timeslice(const struct intel_engine_cs *engine,
const struct i915_request *rq) const struct i915_request *rq,
const struct rb_node *rb)
{ {
int hint; int hint;
...@@ -1903,9 +1912,28 @@ need_timeslice(const struct intel_engine_cs *engine, ...@@ -1903,9 +1912,28 @@ need_timeslice(const struct intel_engine_cs *engine,
return false; return false;
hint = engine->execlists.queue_priority_hint; hint = engine->execlists.queue_priority_hint;
if (rb) {
const struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
const struct intel_engine_cs *inflight =
intel_context_inflight(&ve->context);
if (!inflight || inflight == engine) {
struct i915_request *next;
rcu_read_lock();
next = READ_ONCE(ve->request);
if (next)
hint = max(hint, rq_prio(next));
rcu_read_unlock();
}
}
if (!list_is_last(&rq->sched.link, &engine->active.requests)) if (!list_is_last(&rq->sched.link, &engine->active.requests))
hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
return hint >= effective_prio(rq); return hint >= effective_prio(rq);
} }
...@@ -1977,10 +2005,9 @@ static void set_timeslice(struct intel_engine_cs *engine) ...@@ -1977,10 +2005,9 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, duration); set_timer_ms(&engine->execlists.timer, duration);
} }
static void start_timeslice(struct intel_engine_cs *engine) static void start_timeslice(struct intel_engine_cs *engine, int prio)
{ {
struct intel_engine_execlists *execlists = &engine->execlists; struct intel_engine_execlists *execlists = &engine->execlists;
const int prio = queue_prio(execlists);
unsigned long duration; unsigned long duration;
if (!intel_engine_has_timeslices(engine)) if (!intel_engine_has_timeslices(engine))
...@@ -2140,7 +2167,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2140,7 +2167,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
__unwind_incomplete_requests(engine); __unwind_incomplete_requests(engine);
last = NULL; last = NULL;
} else if (need_timeslice(engine, last) && } else if (need_timeslice(engine, last, rb) &&
timeslice_expired(execlists, last)) { timeslice_expired(execlists, last)) {
if (i915_request_completed(last)) { if (i915_request_completed(last)) {
tasklet_hi_schedule(&execlists->tasklet); tasklet_hi_schedule(&execlists->tasklet);
...@@ -2188,7 +2215,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2188,7 +2215,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy * Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be. * of timeslices, our queue might be.
*/ */
start_timeslice(engine); start_timeslice(engine, queue_prio(execlists));
return; return;
} }
} }
...@@ -2223,7 +2250,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2223,7 +2250,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) { if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
start_timeslice(engine); start_timeslice(engine, rq_prio(rq));
return; /* leave this for another sibling */ return; /* leave this for another sibling */
} }
...@@ -4739,6 +4766,14 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode) ...@@ -4739,6 +4766,14 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode)
return 0; return 0;
} }
static void assert_request_valid(struct i915_request *rq)
{
struct intel_ring *ring __maybe_unused = rq->ring;
/* Can we unwind this request without appearing to go forwards? */
GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
}
/* /*
* Reserve space for 2 NOOPs at the end of each request to be * Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite * used as a workaround for not being allowed to do lite
...@@ -4751,6 +4786,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) ...@@ -4751,6 +4786,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
*cs++ = MI_NOOP; *cs++ = MI_NOOP;
request->wa_tail = intel_ring_offset(request, cs); request->wa_tail = intel_ring_offset(request, cs);
/* Check that entire request is less than half the ring */
assert_request_valid(request);
return cs; return cs;
} }
......
...@@ -315,3 +315,7 @@ int intel_ring_cacheline_align(struct i915_request *rq) ...@@ -315,3 +315,7 @@ int intel_ring_cacheline_align(struct i915_request *rq)
GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
return 0; return 0;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_ring.c"
#endif
...@@ -178,6 +178,12 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) ...@@ -178,6 +178,12 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
wa_write_masked_or(wal, reg, set, set); wa_write_masked_or(wal, reg, set, set);
} }
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
wa_write_masked_or(wal, reg, clr, 0);
}
static void static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{ {
...@@ -686,6 +692,227 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) ...@@ -686,6 +692,227 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
return 0; return 0;
} }
static void
gen4_gt_workarounds_init(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
}
static void
g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
gen4_gt_workarounds_init(i915, wal);
/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
}
static void
ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
g4x_gt_workarounds_init(i915, wal);
wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
}
static void
snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
wa_masked_en(wal,
_3D_CHICKEN,
_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
/* WaDisable_RenderCache_OperationalFlush:snb */
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_add(wal,
GEN6_GT_MODE, 0,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
GEN6_WIZ_HASHING_16x4);
wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
wa_masked_en(wal,
_3D_CHICKEN3,
/* WaStripsFansDisableFastClipPerformanceFix:snb */
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
/*
* Bspec says:
* "This bit must be set if 3DSTATE_CLIP clip mode is set
* to normal and 3DSTATE_SF number of SF output attributes
* is more than 16."
*/
_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
}
static void
ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* WaDisableEarlyCull:ivb */
wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
/* WaDisablePSDDualDispatchEnable:ivb */
if (IS_IVB_GT1(i915))
wa_masked_en(wal,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
/* WaDisable_RenderCache_OperationalFlush:ivb */
wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode:ivb */
wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
/* WaForceL3Serialization:ivb */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
* WaVSThreadDispatchOverride:ivb,vlv
*
* This actually overrides the dispatch
* mode for all thread types.
*/
wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
GEN7_FF_SCHED_MASK,
GEN7_FF_TS_SCHED_HW |
GEN7_FF_VS_SCHED_HW |
GEN7_FF_DS_SCHED_HW);
if (0) { /* causes HiZ corruption on ivb:gt1 */
/* enable HiZ Raw Stall Optimization */
wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
}
/* WaDisable4x2SubspanOptimization:ivb */
wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_add(wal, GEN7_GT_MODE, 0,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
GEN6_WIZ_HASHING_16x4);
}
static void
vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* WaDisableEarlyCull:vlv */
wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
/* WaPsdDispatchEnable:vlv */
/* WaDisablePSDDualDispatchEnable:vlv */
wa_masked_en(wal,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
/* WaDisable_RenderCache_OperationalFlush:vlv */
wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
* WaVSThreadDispatchOverride:ivb,vlv
*
* This actually overrides the dispatch
* mode for all thread types.
*/
wa_write_masked_or(wal,
GEN7_FF_THREAD_MODE,
GEN7_FF_SCHED_MASK,
GEN7_FF_TS_SCHED_HW |
GEN7_FF_VS_SCHED_HW |
GEN7_FF_DS_SCHED_HW);
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization isn't listed for VLV.
*/
wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_add(wal, GEN7_GT_MODE, 0,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
GEN6_WIZ_HASHING_16x4);
/*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
}
static void
hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* L3 caching of data atomics doesn't work -- disable it. */
wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
wa_add(wal,
HSW_ROW_CHICKEN3, 0,
_MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
0 /* XXX does this reg exist? */);
/* WaVSRefCountFullforceMissDisable:hsw */
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
wa_masked_dis(wal,
CACHE_MODE_0_GEN7,
/* WaDisable_RenderCache_OperationalFlush:hsw */
RC_OP_FLUSH_ENABLE |
/* enable HiZ Raw Stall Optimization */
HIZ_RAW_STALL_OPT_DISABLE);
/* WaDisable4x2SubspanOptimization:hsw */
wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_add(wal, GEN7_GT_MODE, 0,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
GEN6_WIZ_HASHING_16x4);
/* WaSampleCChickenBitEnable:hsw */
wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
}
static void static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{ {
...@@ -963,6 +1190,20 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) ...@@ -963,6 +1190,20 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
bxt_gt_workarounds_init(i915, wal); bxt_gt_workarounds_init(i915, wal);
else if (IS_SKYLAKE(i915)) else if (IS_SKYLAKE(i915))
skl_gt_workarounds_init(i915, wal); skl_gt_workarounds_init(i915, wal);
else if (IS_HASWELL(i915))
hsw_gt_workarounds_init(i915, wal);
else if (IS_VALLEYVIEW(i915))
vlv_gt_workarounds_init(i915, wal);
else if (IS_IVYBRIDGE(i915))
ivb_gt_workarounds_init(i915, wal);
else if (IS_GEN(i915, 6))
snb_gt_workarounds_init(i915, wal);
else if (IS_GEN(i915, 5))
ilk_gt_workarounds_init(i915, wal);
else if (IS_G4X(i915))
g4x_gt_workarounds_init(i915, wal);
else if (IS_GEN(i915, 4))
gen4_gt_workarounds_init(i915, wal);
else if (INTEL_GEN(i915) <= 8) else if (INTEL_GEN(i915) <= 8)
return; return;
else else
......
...@@ -310,22 +310,20 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq) ...@@ -310,22 +310,20 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000)); 1000));
} }
static void engine_heartbeat_disable(struct intel_engine_cs *engine, static void engine_heartbeat_disable(struct intel_engine_cs *engine)
unsigned long *saved)
{ {
*saved = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0; engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine); intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine); intel_engine_park_heartbeat(engine);
} }
static void engine_heartbeat_enable(struct intel_engine_cs *engine, static void engine_heartbeat_enable(struct intel_engine_cs *engine)
unsigned long saved)
{ {
intel_engine_pm_put(engine); intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms = saved; engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
} }
static int igt_hang_sanitycheck(void *arg) static int igt_hang_sanitycheck(void *arg)
...@@ -473,7 +471,6 @@ static int igt_reset_nop_engine(void *arg) ...@@ -473,7 +471,6 @@ static int igt_reset_nop_engine(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count, count; unsigned int reset_count, reset_engine_count, count;
struct intel_context *ce; struct intel_context *ce;
unsigned long heartbeat;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
int err; int err;
...@@ -485,7 +482,7 @@ static int igt_reset_nop_engine(void *arg) ...@@ -485,7 +482,7 @@ static int igt_reset_nop_engine(void *arg)
reset_engine_count = i915_reset_engine_count(global, engine); reset_engine_count = i915_reset_engine_count(global, engine);
count = 0; count = 0;
engine_heartbeat_disable(engine, &heartbeat); engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags); set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do { do {
int i; int i;
...@@ -529,7 +526,7 @@ static int igt_reset_nop_engine(void *arg) ...@@ -529,7 +526,7 @@ static int igt_reset_nop_engine(void *arg)
} }
} while (time_before(jiffies, end_time)); } while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags); clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine_heartbeat_enable(engine, heartbeat); engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count); pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
...@@ -564,7 +561,6 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) ...@@ -564,7 +561,6 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count; unsigned int reset_count, reset_engine_count;
unsigned long heartbeat;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine)) if (active && !intel_engine_can_store_dword(engine))
...@@ -580,7 +576,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) ...@@ -580,7 +576,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_count = i915_reset_count(global); reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine); reset_engine_count = i915_reset_engine_count(global, engine);
engine_heartbeat_disable(engine, &heartbeat); engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags); set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do { do {
if (active) { if (active) {
...@@ -632,7 +628,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) ...@@ -632,7 +628,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
} }
} while (time_before(jiffies, end_time)); } while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags); clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine_heartbeat_enable(engine, heartbeat); engine_heartbeat_enable(engine);
if (err) if (err)
break; break;
...@@ -789,7 +785,6 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -789,7 +785,6 @@ static int __igt_reset_engines(struct intel_gt *gt,
struct active_engine threads[I915_NUM_ENGINES] = {}; struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global); unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported; unsigned long count = 0, reported;
unsigned long heartbeat;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
if (flags & TEST_ACTIVE && if (flags & TEST_ACTIVE &&
...@@ -832,7 +827,7 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -832,7 +827,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */ yield(); /* start all threads before we begin */
engine_heartbeat_disable(engine, &heartbeat); engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags); set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do { do {
struct i915_request *rq = NULL; struct i915_request *rq = NULL;
...@@ -906,7 +901,7 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -906,7 +901,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
} }
} while (time_before(jiffies, end_time)); } while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags); clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine_heartbeat_enable(engine, heartbeat); engine_heartbeat_enable(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n", pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count); engine->name, test_name, count);
......
This diff is collapsed.
...@@ -18,6 +18,20 @@ struct live_mocs { ...@@ -18,6 +18,20 @@ struct live_mocs {
void *vaddr; void *vaddr;
}; };
static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return ce;
/* We build large requests to read the registers from the ring */
ce->ring = __intel_context_ring_size(SZ_16K);
return ce;
}
static int request_add_sync(struct i915_request *rq, int err) static int request_add_sync(struct i915_request *rq, int err)
{ {
i915_request_get(rq); i915_request_get(rq);
...@@ -301,7 +315,7 @@ static int live_mocs_clean(void *arg) ...@@ -301,7 +315,7 @@ static int live_mocs_clean(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct intel_context *ce; struct intel_context *ce;
ce = intel_context_create(engine); ce = mocs_context_create(engine);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
err = PTR_ERR(ce); err = PTR_ERR(ce);
break; break;
...@@ -395,7 +409,7 @@ static int live_mocs_reset(void *arg) ...@@ -395,7 +409,7 @@ static int live_mocs_reset(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct intel_context *ce; struct intel_context *ce;
ce = intel_context_create(engine); ce = mocs_context_create(engine);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
err = PTR_ERR(ce); err = PTR_ERR(ce);
break; break;
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2020 Intel Corporation
*/
static struct intel_ring *mock_ring(unsigned long sz)
{
struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
kref_init(&ring->ref);
ring->size = sz;
ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz);
ring->effective_size = sz;
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
intel_ring_update_space(ring);
return ring;
}
static void mock_ring_free(struct intel_ring *ring)
{
kfree(ring);
}
static int check_ring_direction(struct intel_ring *ring,
u32 next, u32 prev,
int expected)
{
int result;
result = intel_ring_direction(ring, next, prev);
if (result < 0)
result = -1;
else if (result > 0)
result = 1;
if (result != expected) {
pr_err("intel_ring_direction(%u, %u):%d != %d\n",
next, prev, result, expected);
return -EINVAL;
}
return 0;
}
static int check_ring_step(struct intel_ring *ring, u32 x, u32 step)
{
u32 prev = x, next = intel_ring_wrap(ring, x + step);
int err = 0;
err |= check_ring_direction(ring, next, next, 0);
err |= check_ring_direction(ring, prev, prev, 0);
err |= check_ring_direction(ring, next, prev, 1);
err |= check_ring_direction(ring, prev, next, -1);
return err;
}
static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step)
{
int err = 0;
err |= check_ring_step(ring, x, step);
err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step);
err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step);
return err;
}
static int igt_ring_direction(void *dummy)
{
struct intel_ring *ring;
unsigned int half = 2048;
int step, err = 0;
ring = mock_ring(2 * half);
if (!ring)
return -ENOMEM;
GEM_BUG_ON(ring->size != 2 * half);
/* Precision of wrap detection is limited to ring->size / 2 */
for (step = 1; step < half; step <<= 1) {
err |= check_ring_offset(ring, 0, step);
err |= check_ring_offset(ring, half, step);
}
err |= check_ring_step(ring, 0, half - 64);
/* And check unwrapped handling for good measure */
err |= check_ring_offset(ring, 0, 2 * half + 64);
err |= check_ring_offset(ring, 3 * half, 1);
mock_ring_free(ring);
return err;
}
int intel_ring_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_ring_direction),
};
return i915_subtests(tests, NULL);
}
...@@ -20,24 +20,20 @@ ...@@ -20,24 +20,20 @@
/* Try to isolate the impact of cstates from determing frequency response */ /* Try to isolate the impact of cstates from determing frequency response */
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */ #define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine) static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{ {
unsigned long old; engine->props.heartbeat_interval_ms = 0;
old = fetch_and_zero(&engine->props.heartbeat_interval_ms);
intel_engine_pm_get(engine); intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine); intel_engine_park_heartbeat(engine);
return old;
} }
static void engine_heartbeat_enable(struct intel_engine_cs *engine, static void engine_heartbeat_enable(struct intel_engine_cs *engine)
unsigned long saved)
{ {
intel_engine_pm_put(engine); intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms = saved; engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
} }
static void dummy_rps_work(struct work_struct *wrk) static void dummy_rps_work(struct work_struct *wrk)
...@@ -246,7 +242,6 @@ int live_rps_clock_interval(void *arg) ...@@ -246,7 +242,6 @@ int live_rps_clock_interval(void *arg)
intel_gt_check_clock_frequency(gt); intel_gt_check_clock_frequency(gt);
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
unsigned long saved_heartbeat;
struct i915_request *rq; struct i915_request *rq;
u32 cycles; u32 cycles;
u64 dt; u64 dt;
...@@ -254,13 +249,13 @@ int live_rps_clock_interval(void *arg) ...@@ -254,13 +249,13 @@ int live_rps_clock_interval(void *arg)
if (!intel_engine_can_store_dword(engine)) if (!intel_engine_can_store_dword(engine))
continue; continue;
saved_heartbeat = engine_heartbeat_disable(engine); engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin, rq = igt_spinner_create_request(&spin,
engine->kernel_context, engine->kernel_context,
MI_NOOP); MI_NOOP);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
err = PTR_ERR(rq); err = PTR_ERR(rq);
break; break;
} }
...@@ -271,7 +266,7 @@ int live_rps_clock_interval(void *arg) ...@@ -271,7 +266,7 @@ int live_rps_clock_interval(void *arg)
pr_err("%s: RPS spinner did not start\n", pr_err("%s: RPS spinner did not start\n",
engine->name); engine->name);
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt); intel_gt_set_wedged(engine->gt);
err = -EIO; err = -EIO;
break; break;
...@@ -327,7 +322,7 @@ int live_rps_clock_interval(void *arg) ...@@ -327,7 +322,7 @@ int live_rps_clock_interval(void *arg)
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
if (err == 0) { if (err == 0) {
u64 time = intel_gt_pm_interval_to_ns(gt, cycles); u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
...@@ -405,7 +400,6 @@ int live_rps_control(void *arg) ...@@ -405,7 +400,6 @@ int live_rps_control(void *arg)
intel_gt_pm_get(gt); intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
unsigned long saved_heartbeat;
struct i915_request *rq; struct i915_request *rq;
ktime_t min_dt, max_dt; ktime_t min_dt, max_dt;
int f, limit; int f, limit;
...@@ -414,7 +408,7 @@ int live_rps_control(void *arg) ...@@ -414,7 +408,7 @@ int live_rps_control(void *arg)
if (!intel_engine_can_store_dword(engine)) if (!intel_engine_can_store_dword(engine))
continue; continue;
saved_heartbeat = engine_heartbeat_disable(engine); engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin, rq = igt_spinner_create_request(&spin,
engine->kernel_context, engine->kernel_context,
...@@ -430,7 +424,7 @@ int live_rps_control(void *arg) ...@@ -430,7 +424,7 @@ int live_rps_control(void *arg)
pr_err("%s: RPS spinner did not start\n", pr_err("%s: RPS spinner did not start\n",
engine->name); engine->name);
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt); intel_gt_set_wedged(engine->gt);
err = -EIO; err = -EIO;
break; break;
...@@ -440,7 +434,7 @@ int live_rps_control(void *arg) ...@@ -440,7 +434,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not set minimum frequency [%x], only %x!\n", pr_err("%s: could not set minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps)); engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
show_pstate_limits(rps); show_pstate_limits(rps);
err = -EINVAL; err = -EINVAL;
break; break;
...@@ -457,7 +451,7 @@ int live_rps_control(void *arg) ...@@ -457,7 +451,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not restore minimum frequency [%x], only %x!\n", pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps)); engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
show_pstate_limits(rps); show_pstate_limits(rps);
err = -EINVAL; err = -EINVAL;
break; break;
...@@ -472,7 +466,7 @@ int live_rps_control(void *arg) ...@@ -472,7 +466,7 @@ int live_rps_control(void *arg)
min_dt = ktime_sub(ktime_get(), min_dt); min_dt = ktime_sub(ktime_get(), min_dt);
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n", pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
engine->name, engine->name,
...@@ -635,7 +629,6 @@ int live_rps_frequency_cs(void *arg) ...@@ -635,7 +629,6 @@ int live_rps_frequency_cs(void *arg)
rps->work.func = dummy_rps_work; rps->work.func = dummy_rps_work;
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
unsigned long saved_heartbeat;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
u32 *cancel, *cntr; u32 *cancel, *cntr;
...@@ -644,14 +637,14 @@ int live_rps_frequency_cs(void *arg) ...@@ -644,14 +637,14 @@ int live_rps_frequency_cs(void *arg)
int freq; int freq;
} min, max; } min, max;
saved_heartbeat = engine_heartbeat_disable(engine); engine_heartbeat_disable(engine);
vma = create_spin_counter(engine, vma = create_spin_counter(engine,
engine->kernel_context->vm, false, engine->kernel_context->vm, false,
&cancel, &cntr); &cancel, &cntr);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
break; break;
} }
...@@ -732,7 +725,7 @@ int live_rps_frequency_cs(void *arg) ...@@ -732,7 +725,7 @@ int live_rps_frequency_cs(void *arg)
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_put(vma); i915_vma_put(vma);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915)) if (igt_flush_test(gt->i915))
err = -EIO; err = -EIO;
if (err) if (err)
...@@ -778,7 +771,6 @@ int live_rps_frequency_srm(void *arg) ...@@ -778,7 +771,6 @@ int live_rps_frequency_srm(void *arg)
rps->work.func = dummy_rps_work; rps->work.func = dummy_rps_work;
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
unsigned long saved_heartbeat;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
u32 *cancel, *cntr; u32 *cancel, *cntr;
...@@ -787,14 +779,14 @@ int live_rps_frequency_srm(void *arg) ...@@ -787,14 +779,14 @@ int live_rps_frequency_srm(void *arg)
int freq; int freq;
} min, max; } min, max;
saved_heartbeat = engine_heartbeat_disable(engine); engine_heartbeat_disable(engine);
vma = create_spin_counter(engine, vma = create_spin_counter(engine,
engine->kernel_context->vm, true, engine->kernel_context->vm, true,
&cancel, &cntr); &cancel, &cntr);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
break; break;
} }
...@@ -874,7 +866,7 @@ int live_rps_frequency_srm(void *arg) ...@@ -874,7 +866,7 @@ int live_rps_frequency_srm(void *arg)
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_put(vma); i915_vma_put(vma);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915)) if (igt_flush_test(gt->i915))
err = -EIO; err = -EIO;
if (err) if (err)
...@@ -1066,16 +1058,14 @@ int live_rps_interrupt(void *arg) ...@@ -1066,16 +1058,14 @@ int live_rps_interrupt(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
/* Keep the engine busy with a spinner; expect an UP! */ /* Keep the engine busy with a spinner; expect an UP! */
if (pm_events & GEN6_PM_RP_UP_THRESHOLD) { if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
unsigned long saved_heartbeat;
intel_gt_pm_wait_for_idle(engine->gt); intel_gt_pm_wait_for_idle(engine->gt);
GEM_BUG_ON(intel_rps_is_active(rps)); GEM_BUG_ON(intel_rps_is_active(rps));
saved_heartbeat = engine_heartbeat_disable(engine); engine_heartbeat_disable(engine);
err = __rps_up_interrupt(rps, engine, &spin); err = __rps_up_interrupt(rps, engine, &spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
if (err) if (err)
goto out; goto out;
...@@ -1084,15 +1074,13 @@ int live_rps_interrupt(void *arg) ...@@ -1084,15 +1074,13 @@ int live_rps_interrupt(void *arg)
/* Keep the engine awake but idle and check for DOWN */ /* Keep the engine awake but idle and check for DOWN */
if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) { if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
unsigned long saved_heartbeat; engine_heartbeat_disable(engine);
saved_heartbeat = engine_heartbeat_disable(engine);
intel_rc6_disable(&gt->rc6); intel_rc6_disable(&gt->rc6);
err = __rps_down_interrupt(rps, engine); err = __rps_down_interrupt(rps, engine);
intel_rc6_enable(&gt->rc6); intel_rc6_enable(&gt->rc6);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
if (err) if (err)
goto out; goto out;
} }
...@@ -1168,7 +1156,6 @@ int live_rps_power(void *arg) ...@@ -1168,7 +1156,6 @@ int live_rps_power(void *arg)
rps->work.func = dummy_rps_work; rps->work.func = dummy_rps_work;
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
unsigned long saved_heartbeat;
struct i915_request *rq; struct i915_request *rq;
struct { struct {
u64 power; u64 power;
...@@ -1178,13 +1165,13 @@ int live_rps_power(void *arg) ...@@ -1178,13 +1165,13 @@ int live_rps_power(void *arg)
if (!intel_engine_can_store_dword(engine)) if (!intel_engine_can_store_dword(engine))
continue; continue;
saved_heartbeat = engine_heartbeat_disable(engine); engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin, rq = igt_spinner_create_request(&spin,
engine->kernel_context, engine->kernel_context,
MI_NOOP); MI_NOOP);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
err = PTR_ERR(rq); err = PTR_ERR(rq);
break; break;
} }
...@@ -1195,7 +1182,7 @@ int live_rps_power(void *arg) ...@@ -1195,7 +1182,7 @@ int live_rps_power(void *arg)
pr_err("%s: RPS spinner did not start\n", pr_err("%s: RPS spinner did not start\n",
engine->name); engine->name);
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt); intel_gt_set_wedged(engine->gt);
err = -EIO; err = -EIO;
break; break;
...@@ -1208,7 +1195,7 @@ int live_rps_power(void *arg) ...@@ -1208,7 +1195,7 @@ int live_rps_power(void *arg)
min.power = measure_power_at(rps, &min.freq); min.power = measure_power_at(rps, &min.freq);
igt_spinner_end(&spin); igt_spinner_end(&spin);
engine_heartbeat_enable(engine, saved_heartbeat); engine_heartbeat_enable(engine);
pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
engine->name, engine->name,
......
...@@ -751,22 +751,20 @@ static int live_hwsp_wrap(void *arg) ...@@ -751,22 +751,20 @@ static int live_hwsp_wrap(void *arg)
return err; return err;
} }
static void engine_heartbeat_disable(struct intel_engine_cs *engine, static void engine_heartbeat_disable(struct intel_engine_cs *engine)
unsigned long *saved)
{ {
*saved = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0; engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine); intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine); intel_engine_park_heartbeat(engine);
} }
static void engine_heartbeat_enable(struct intel_engine_cs *engine, static void engine_heartbeat_enable(struct intel_engine_cs *engine)
unsigned long saved)
{ {
intel_engine_pm_put(engine); intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms = saved; engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
} }
static int live_hwsp_rollover_kernel(void *arg) static int live_hwsp_rollover_kernel(void *arg)
...@@ -785,10 +783,9 @@ static int live_hwsp_rollover_kernel(void *arg) ...@@ -785,10 +783,9 @@ static int live_hwsp_rollover_kernel(void *arg)
struct intel_context *ce = engine->kernel_context; struct intel_context *ce = engine->kernel_context;
struct intel_timeline *tl = ce->timeline; struct intel_timeline *tl = ce->timeline;
struct i915_request *rq[3] = {}; struct i915_request *rq[3] = {};
unsigned long heartbeat;
int i; int i;
engine_heartbeat_disable(engine, &heartbeat); engine_heartbeat_disable(engine);
if (intel_gt_wait_for_idle(gt, HZ / 2)) { if (intel_gt_wait_for_idle(gt, HZ / 2)) {
err = -EIO; err = -EIO;
goto out; goto out;
...@@ -839,7 +836,7 @@ static int live_hwsp_rollover_kernel(void *arg) ...@@ -839,7 +836,7 @@ static int live_hwsp_rollover_kernel(void *arg)
out: out:
for (i = 0; i < ARRAY_SIZE(rq); i++) for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]); i915_request_put(rq[i]);
engine_heartbeat_enable(engine, heartbeat); engine_heartbeat_enable(engine);
if (err) if (err)
break; break;
} }
......
...@@ -623,6 +623,8 @@ static int check_dirty_whitelist(struct intel_context *ce) ...@@ -623,6 +623,8 @@ static int check_dirty_whitelist(struct intel_context *ce)
err = -EINVAL; err = -EINVAL;
goto out_unpin; goto out_unpin;
} }
} else {
rsvd = 0;
} }
expect = results[0]; expect = results[0];
......
...@@ -3125,6 +3125,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) ...@@ -3125,6 +3125,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
val = I915_READ(GEN11_DE_HPD_IMR); val = I915_READ(GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs; val &= ~hotplug_irqs;
val |= ~enabled_irqs & hotplug_irqs;
I915_WRITE(GEN11_DE_HPD_IMR, val); I915_WRITE(GEN11_DE_HPD_IMR, val);
POSTING_READ(GEN11_DE_HPD_IMR); POSTING_READ(GEN11_DE_HPD_IMR);
......
...@@ -269,39 +269,15 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915) ...@@ -269,39 +269,15 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915)
return IS_GEN(i915, 7); return IS_GEN(i915, 7);
} }
static void static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
engines_sample(struct intel_gt *gt, unsigned int period_ns)
{ {
struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
if (!intel_gt_pm_is_awake(gt))
return;
for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu; struct intel_engine_pmu *pmu = &engine->pmu;
spinlock_t *mmio_lock;
unsigned long flags;
bool busy; bool busy;
u32 val; u32 val;
if (!intel_engine_pm_get_if_awake(engine))
continue;
mmio_lock = NULL;
if (exclusive_mmio_access(i915))
mmio_lock = &engine->uncore->lock;
if (unlikely(mmio_lock))
spin_lock_irqsave(mmio_lock, flags);
val = ENGINE_READ_FW(engine, RING_CTL); val = ENGINE_READ_FW(engine, RING_CTL);
if (val == 0) /* powerwell off => engine idle */ if (val == 0) /* powerwell off => engine idle */
goto skip; return;
if (val & RING_WAIT) if (val & RING_WAIT)
add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
...@@ -310,7 +286,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) ...@@ -310,7 +286,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
/* No need to sample when busy stats are supported. */ /* No need to sample when busy stats are supported. */
if (intel_engine_supports_stats(engine)) if (intel_engine_supports_stats(engine))
goto skip; return;
/* /*
* While waiting on a semaphore or event, MI_MODE reports the * While waiting on a semaphore or event, MI_MODE reports the
...@@ -326,10 +302,34 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) ...@@ -326,10 +302,34 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
} }
if (busy) if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
}
static void
engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long flags;
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
if (!intel_gt_pm_is_awake(gt))
return;
for_each_engine(engine, gt, id) {
if (!intel_engine_pm_get_if_awake(engine))
continue;
if (exclusive_mmio_access(i915)) {
spin_lock_irqsave(&engine->uncore->lock, flags);
engine_sample(engine, period_ns);
spin_unlock_irqrestore(&engine->uncore->lock, flags);
} else {
engine_sample(engine, period_ns);
}
skip:
if (unlikely(mmio_lock))
spin_unlock_irqrestore(mmio_lock, flags);
intel_engine_pm_put_async(engine); intel_engine_pm_put_async(engine);
} }
} }
......
...@@ -42,7 +42,7 @@ enum { ...@@ -42,7 +42,7 @@ enum {
* active request. * active request.
*/ */
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX #define I915_PRIORITY_UNPREEMPTABLE INT_MAX
#define I915_PRIORITY_BARRIER INT_MAX #define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1)
struct i915_priolist { struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT]; struct list_head requests[I915_PRIORITY_COUNT];
......
...@@ -7896,7 +7896,7 @@ enum { ...@@ -7896,7 +7896,7 @@ enum {
/* GEN7 chicken */ /* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
#define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26)) #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10)
#define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14) #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
......
This diff is collapsed.
...@@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests) ...@@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests) selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests) selftest(uncore, intel_uncore_mock_selftests)
selftest(ring, intel_ring_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, intel_timeline_mock_selftests) selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests) selftest(requests, i915_request_mock_selftests)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment