Commit 96980844 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2017-12-07' of...

Merge tag 'drm-intel-fixes-2017-12-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix for fd.o bug #103997 CNL eDP + HDMI causing a machine hard hang (James)
- Fix to allow suspending with a wedged GPU to hopefully unwedge it (Chris)
- Fix for Gen2 vblank timestap/frame counter jumps (Ville)
- Revert of a W/A for enabling FBC on CNL/GLK for certain images
  and sizes (Rodrigo)
- Lockdep fix for i915 userptr code (Chris)

gvt-fixes-2017-12-06

- Fix invalid hw reg read value for vGPU (Xiong)
- Fix qemu warning on PCI ROM bar missing (Changbin)
- Workaround preemption regression (Zhenyu)

* tag 'drm-intel-fixes-2017-12-07' of git://anongit.freedesktop.org/drm/drm-intel:
  Revert "drm/i915: Display WA #1133 WaFbcSkipSegments:cnl, glk"
  drm/i915: Call i915_gem_init_userptr() before taking struct_mutex
  drm/i915/gvt: set max priority for gvt context
  drm/i915/gvt: Don't mark vgpu context as inactive when preempted
  drm/i915/gvt: Limit read hw reg to active vgpu
  drm/i915/gvt: Export intel_gvt_render_mmio_to_ring_id()
  drm/i915/gvt: Emulate PCI expansion ROM base address register
  drm/i915/cnl: Mask previous DDI - PLL mapping
  drm/i915: Fix vblank timestamp/frame counter jumps on gen2
  drm/i915: Skip switch-to-kernel-context on suspend when wedged
parents c2ef3a67 d85936ab
...@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, ...@@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return 0; return 0;
} }
static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
u32 new = *(u32 *)(p_data);
if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
/* We don't have rom, return size of 0. */
*pval = 0;
else
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
...@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
} }
switch (rounddown(offset, 4)) { switch (rounddown(offset, 4)) {
case PCI_ROM_ADDRESS:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (WARN_ON(!IS_ALIGNED(offset, 4))) if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL; return -EINVAL;
...@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, ...@@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
pci_resource_len(gvt->dev_priv->drm.pdev, 0); pci_resource_len(gvt->dev_priv->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2); pci_resource_len(gvt->dev_priv->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
} }
/** /**
......
...@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt, ...@@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
return 0; return 0;
} }
static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) /**
* intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* Ring ID on success, negative error code if failed.
*/
int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
unsigned int offset)
{ {
enum intel_engine_id id; enum intel_engine_id id;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
reg &= ~GENMASK(11, 0); offset &= ~GENMASK(11, 0);
for_each_engine(engine, gvt->dev_priv, id) { for_each_engine(engine, gvt->dev_priv, id) {
if (engine->mmio_base == reg) if (engine->mmio_base == offset)
return id; return id;
} }
return -1; return -ENODEV;
} }
#define offset_to_fence_num(offset) \ #define offset_to_fence_num(offset) \
...@@ -1398,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1398,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
static int mmio_read_from_hw(struct intel_vgpu *vgpu, static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ring_id;
u32 ring_base;
ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
/**
* Read HW reg in following case
* a. the offset isn't a ring mmio
* b. the offset's ring is running on hw.
* c. the offset is ring time stamp mmio
*/
if (ring_id >= 0)
ring_base = dev_priv->engine[ring_id]->mmio_base;
if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
mmio_hw_access_pre(dev_priv);
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
mmio_hw_access_post(dev_priv);
}
mmio_hw_access_pre(dev_priv);
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
} }
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist; struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data; u32 data = *(u32 *)p_data;
int ret = 0; int ret = 0;
...@@ -1436,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1436,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
u32 data = *(u32 *)p_data; u32 data = *(u32 *)p_data;
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist; bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
......
...@@ -65,6 +65,8 @@ struct intel_gvt_mmio_info { ...@@ -65,6 +65,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node; struct hlist_node node;
}; };
int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
......
...@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req) ...@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
return i915_gem_context_force_single_submission(req->ctx); return i915_gem_context_force_single_submission(req->ctx);
} }
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
i915_reg_t reg;
reg = RING_INSTDONE(ring_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
reg = RING_ACTHD(ring_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
reg = RING_ACTHD_UDW(ring_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
}
static int shadow_context_status_change(struct notifier_block *nb, static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
...@@ -175,9 +189,12 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -175,9 +189,12 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1); atomic_set(&workload->shadow_ctx_active, 1);
break; break;
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
case INTEL_CONTEXT_SCHEDULE_PREEMPTED: save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id);
break;
default: default:
WARN_ON(1); WARN_ON(1);
return NOTIFY_OK; return NOTIFY_OK;
...@@ -740,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) ...@@ -740,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
if (IS_ERR(vgpu->shadow_ctx)) if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx); return PTR_ERR(vgpu->shadow_ctx);
if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
vgpu->shadow_ctx->priority = INT_MAX;
vgpu->shadow_ctx->engine[RCS].initialised = true; vgpu->shadow_ctx->engine[RCS].initialised = true;
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES); bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
......
...@@ -4712,17 +4712,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ...@@ -4712,17 +4712,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* state. Fortunately, the kernel_context is disposable and we do * state. Fortunately, the kernel_context is disposable and we do
* not rely on its state. * not rely on its state.
*/ */
ret = i915_gem_switch_to_kernel_context(dev_priv); if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
if (ret) ret = i915_gem_switch_to_kernel_context(dev_priv);
goto err_unlock; if (ret)
goto err_unlock;
ret = i915_gem_wait_for_idle(dev_priv, ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED); I915_WAIT_LOCKED);
if (ret && ret != -EIO) if (ret && ret != -EIO)
goto err_unlock; goto err_unlock;
assert_kernel_context_is_current(dev_priv); assert_kernel_context_is_current(dev_priv);
}
i915_gem_contexts_lost(dev_priv); i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -4946,8 +4948,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -4946,8 +4948,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{ {
int ret; int ret;
mutex_lock(&dev_priv->drm.struct_mutex);
/* /*
* We need to fallback to 4K pages since gvt gtt handling doesn't * We need to fallback to 4K pages since gvt gtt handling doesn't
* support huge page entries - we will need to check either hypervisor * support huge page entries - we will need to check either hypervisor
...@@ -4967,18 +4967,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -4967,18 +4967,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
} }
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
/* This is just a security blanket to placate dragons. /* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs * On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If * used by the CS may be stale, despite us poking the TLB reset. If
* we hold the forcewake during initialisation these problems * we hold the forcewake during initialisation these problems
* just magically go away. * just magically go away.
*/ */
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_userptr(dev_priv);
if (ret)
goto out_unlock;
ret = i915_gem_init_ggtt(dev_priv); ret = i915_gem_init_ggtt(dev_priv);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
......
...@@ -2951,9 +2951,6 @@ enum i915_power_well_id { ...@@ -2951,9 +2951,6 @@ enum i915_power_well_id {
#define ILK_DPFC_CHICKEN _MMIO(0x43224) #define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1<<8) #define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23) #define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define GLK_SKIP_SEG_EN (1<<12)
#define GLK_SKIP_SEG_COUNT_MASK (3<<10)
#define GLK_SKIP_SEG_COUNT(x) ((x)<<10)
#define ILK_FBC_RT_BASE _MMIO(0x2128) #define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0) #define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1) #define SNB_FBC_FRONT_BUFFER (1<<1)
......
...@@ -2131,6 +2131,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, ...@@ -2131,6 +2131,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_CANNONLAKE(dev_priv)) { if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0); val = I915_READ(DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port); val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
I915_WRITE(DPCLKA_CFGCR0, val); I915_WRITE(DPCLKA_CFGCR0, val);
......
...@@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, ...@@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
return crtc->config->cpu_transcoder; return crtc->config->cpu_transcoder;
} }
static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{ {
i915_reg_t reg = PIPEDSL(pipe); i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2; u32 line1, line2;
...@@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) ...@@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
msleep(5); msleep(5);
line2 = I915_READ(reg) & line_mask; line2 = I915_READ(reg) & line_mask;
return line1 == line2; return line1 != line2;
}
static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Wait for the display line to settle/start moving */
if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
DRM_ERROR("pipe %c scanline %s wait timed out\n",
pipe_name(pipe), onoff(state));
}
static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
{
wait_for_pipe_scanline_moving(crtc, false);
}
static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
{
wait_for_pipe_scanline_moving(crtc, true);
} }
/* /*
...@@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) ...@@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{ {
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
if (INTEL_GEN(dev_priv) >= 4) { if (INTEL_GEN(dev_priv) >= 4) {
i915_reg_t reg = PIPECONF(cpu_transcoder); i915_reg_t reg = PIPECONF(cpu_transcoder);
...@@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) ...@@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
100)) 100))
WARN(1, "pipe_off wait timed out\n"); WARN(1, "pipe_off wait timed out\n");
} else { } else {
/* Wait for the display line to settle */ intel_wait_for_pipe_scanline_stopped(crtc);
if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
WARN(1, "pipe_off wait timed out\n");
} }
} }
...@@ -1936,15 +1955,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc) ...@@ -1936,15 +1955,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
POSTING_READ(reg); POSTING_READ(reg);
/* /*
* Until the pipe starts DSL will read as 0, which would cause * Until the pipe starts PIPEDSL reads will return a stale value,
* an apparent vblank timestamp jump, which messes up also the * which causes an apparent vblank timestamp jump when PIPEDSL
* frame count when it's derived from the timestamps. So let's * resets to its proper value. That also messes up the frame count
* wait for the pipe to start properly before we call * when it's derived from the timestamps. So let's wait for the
* drm_crtc_vblank_on() * pipe to start properly before we call drm_crtc_vblank_on()
*/ */
if (dev->max_vblank_count == 0 && if (dev->max_vblank_count == 0)
wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) intel_wait_for_pipe_scanline_moving(crtc);
DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
} }
/** /**
...@@ -14643,6 +14661,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) ...@@ -14643,6 +14661,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{ {
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
pipe_name(pipe)); pipe_name(pipe));
...@@ -14652,8 +14672,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) ...@@ -14652,8 +14672,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
I915_WRITE(PIPECONF(pipe), 0); I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe)); POSTING_READ(PIPECONF(pipe));
if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) intel_wait_for_pipe_scanline_stopped(crtc);
DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe)); POSTING_READ(DPLL(pipe));
......
...@@ -124,7 +124,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) ...@@ -124,7 +124,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
static void glk_init_clock_gating(struct drm_i915_private *dev_priv) static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
{ {
u32 val;
gen9_init_clock_gating(dev_priv); gen9_init_clock_gating(dev_priv);
/* /*
...@@ -144,11 +143,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) ...@@ -144,11 +143,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val); I915_WRITE(CHICKEN_MISC_2, val);
} }
/* Display WA #1133: WaFbcSkipSegments:glk */
val = I915_READ(ILK_DPFC_CHICKEN);
val &= ~GLK_SKIP_SEG_COUNT_MASK;
val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
I915_WRITE(ILK_DPFC_CHICKEN, val);
} }
static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
...@@ -8517,7 +8511,6 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) ...@@ -8517,7 +8511,6 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
{ {
u32 val;
cnp_init_clock_gating(dev_priv); cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */ /* This is not an Wa. Enable for better image quality */
...@@ -8537,12 +8530,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) ...@@ -8537,12 +8530,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
SARBUNIT_CLKGATE_DIS); SARBUNIT_CLKGATE_DIS);
/* Display WA #1133: WaFbcSkipSegments:cnl */
val = I915_READ(ILK_DPFC_CHICKEN);
val &= ~GLK_SKIP_SEG_COUNT_MASK;
val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
I915_WRITE(ILK_DPFC_CHICKEN, val);
} }
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment