Commit 041df357 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel into drm-next

- suspend/resume/freeze/thaw unification from Imre
- wa list improvements from Mika&Arun
- display pll precomputation from Ander Conselvan, this removed the last
  ->mode_set callbacks, a big step towards implementing atomic modesets
- more kerneldoc for the interrupt code
- 180 rotation for cursors (Ville&Sonika)
- ULT/ULX feature check macros cleaned up thanks to Damien
- piles and piles of fixes all over, bug team seems to work!

* tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel: (61 commits)
  drm/i915: Update DRIVER_DATE to 20141024
  drm/i915: add comments on what stage a given PM handler is called
  drm/i915: unify switcheroo and legacy suspend/resume handlers
  drm/i915: add poweroff_late handler
  drm/i915: sanitize suspend/resume helper function names
  drm/i915: unify S3 and S4 suspend/resume handlers
  drm/i915: disable/re-enable PCI device around S4 freeze/thaw
  drm/i915: enable output polling during S4 thaw
  drm/i915: check for GT faults in all resume handlers and driver load time
  drm/i915: remove unused restore_gtt_mappings optimization during suspend
  drm/i915: fix S4 suspend while switcheroo state is off
  drm/i915: vlv: fix switcheroo/legacy suspend/resume
  drm/i915: propagate error from legacy resume handler
  drm/i915: unify legacy S3 suspend and S4 freeze handlers
  drm/i915: factor out i915_drm_suspend_late
  drm/i915: Emit even number of dwords when emitting LRIs
  drm/i915: Add rotation support for cursor plane (v5)
  drm/i915: Correctly reject invalid flags for wait_ioctl
  drm/i915: use macros to assign mmio access functions
  drm/i915: only run hsw_power_well_post_enable when really needed
  ...
parents bbf0ef03 3eebaec6
......@@ -3829,6 +3829,11 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/intel_frontbuffer.c
!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
</sect2>
<sect2>
<title>Display FIFO Underrun Reporting</title>
!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
</sect2>
<sect2>
<title>Plane Configuration</title>
......
......@@ -45,6 +45,7 @@ i915-y += intel_renderstate_gen6.o \
# modesetting core code
i915-y += intel_bios.o \
intel_display.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
intel_modes.o \
intel_overlay.o \
......
......@@ -1848,6 +1848,8 @@ static int i915_execlists(struct seq_file *m, void *data)
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, ring_id) {
struct intel_ctx_submit_request *head_req = NULL;
int count = 0;
......@@ -1899,6 +1901,7 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
......@@ -2655,18 +2658,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
for (i = 0; i < dev_priv->num_wa_regs; ++i) {
u32 addr, mask;
seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
for (i = 0; i < dev_priv->workarounds.count; ++i) {
u32 addr, mask, value, read;
bool ok;
addr = dev_priv->intel_wa_regs[i].addr;
mask = dev_priv->intel_wa_regs[i].mask;
dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
if (dev_priv->intel_wa_regs[i].addr)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
dev_priv->intel_wa_regs[i].addr,
dev_priv->intel_wa_regs[i].value,
dev_priv->intel_wa_regs[i].mask);
addr = dev_priv->workarounds.reg[i].addr;
mask = dev_priv->workarounds.reg[i].mask;
value = dev_priv->workarounds.reg[i].value;
read = I915_READ(addr);
ok = (value & mask) == (read & mask);
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
addr, value, mask, read, ok ? "OK" : "FAIL");
}
intel_runtime_pm_put(dev_priv);
......@@ -3255,6 +3258,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
pipe));
u32 val = 0; /* shut up gcc */
int ret;
......@@ -3290,6 +3295,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
if (!pipe_crc->entries)
return -ENOMEM;
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
hsw_disable_ips(crtc);
spin_lock_irq(&pipe_crc->lock);
pipe_crc->head = 0;
pipe_crc->tail = 0;
......@@ -3328,6 +3341,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
hsw_undo_trans_edp_pipe_A_crc_wa(dev);
hsw_enable_ips(crtc);
}
return 0;
......
......@@ -1275,12 +1275,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
i915_resume_legacy(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
i915_suspend_legacy(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
......@@ -1853,8 +1853,12 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister();
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_fbdev_fini(dev);
drm_vblank_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
/*
......@@ -1895,8 +1899,6 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
drm_vblank_cleanup(dev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
......
......@@ -463,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
WARN_ON(IS_HSW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
......@@ -474,17 +474,15 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
WARN_ON(!IS_HSW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
WARN_ON(IS_ULT(dev));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
WARN_ON(!IS_ULT(dev));
} else
continue;
......@@ -556,7 +554,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
static int i915_drm_freeze(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
......@@ -632,7 +630,26 @@ static int i915_drm_freeze(struct drm_device *dev)
return 0;
}
int i915_suspend(struct drm_device *dev, pm_message_t state)
static int i915_drm_suspend_late(struct drm_device *drm_dev)
{
struct drm_i915_private *dev_priv = drm_dev->dev_private;
int ret;
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
return ret;
}
pci_disable_device(drm_dev->pdev);
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
return 0;
}
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
{
int error;
......@@ -642,48 +659,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return -ENODEV;
}
if (state.event == PM_EVENT_PRETHAW)
return 0;
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
state.event != PM_EVENT_FREEZE))
return -EINVAL;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_freeze(dev);
error = i915_drm_suspend(dev);
if (error)
return error;
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
return 0;
return i915_drm_suspend_late(dev);
}
static int i915_drm_thaw_early(struct drm_device *dev)
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = intel_resume_prepare(dev_priv, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
return ret;
}
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
......@@ -742,21 +736,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_opregion_notify_adapter(dev, PCI_D0);
return 0;
}
static int i915_drm_thaw(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_check_and_clear_faults(dev);
drm_kms_helper_poll_enable(dev);
return __i915_drm_thaw(dev, true);
return 0;
}
static int i915_resume_early(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/*
* We have a resume ordering issue with the snd-hda driver also
......@@ -772,33 +760,29 @@ static int i915_resume_early(struct drm_device *dev)
pci_set_master(dev->pdev);
return i915_drm_thaw_early(dev);
}
ret = intel_resume_prepare(dev_priv, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need to restore the GTT mappings since the BIOS might clear
* all our scratch PTEs.
*/
ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
if (ret)
return ret;
drm_kms_helper_poll_enable(dev);
return 0;
}
static int i915_resume_legacy(struct drm_device *dev)
int i915_resume_legacy(struct drm_device *dev)
{
i915_resume_early(dev);
i915_resume(dev);
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
ret = i915_drm_resume_early(dev);
if (ret)
return ret;
return i915_drm_resume(dev);
}
/**
......@@ -950,15 +934,13 @@ static int i915_pm_suspend(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_freeze(drm_dev);
return i915_drm_suspend(drm_dev);
}
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = drm_dev->dev_private;
int ret;
/*
* We have a suspedn ordering issue with the snd-hda driver also
......@@ -972,16 +954,7 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
ret = intel_suspend_complete(dev_priv);
if (ret)
DRM_ERROR("Suspend complete failed: %d\n", ret);
else {
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
}
return ret;
return i915_drm_suspend_late(drm_dev);
}
static int i915_pm_resume_early(struct device *dev)
......@@ -989,52 +962,21 @@ static int i915_pm_resume_early(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_resume_early(drm_dev);
}
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_resume(drm_dev);
}
static int i915_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
return i915_drm_freeze(drm_dev);
}
static int i915_pm_thaw_early(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_thaw_early(drm_dev);
return i915_drm_resume_early(drm_dev);
}
static int i915_pm_thaw(struct device *dev)
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_drm_thaw(drm_dev);
}
static int i915_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_freeze(drm_dev);
return i915_drm_resume(drm_dev);
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
......@@ -1592,16 +1534,40 @@ static int intel_resume_prepare(struct drm_i915_private *dev_priv,
}
static const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
.thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
/*
* S4 event handlers
* @freeze, @freeze_late : called (1) before creating the
* hibernation image [PMSG_FREEZE] and
* (2) after rebooting, before restoring
* the image [PMSG_QUIESCE]
* @thaw, @thaw_early : called (1) after creating the hibernation
* image, before writing it [PMSG_THAW]
* and (2) after failing to create or
* restore the image [PMSG_RECOVER]
* @poweroff, @poweroff_late: called after writing the hibernation
* image, before rebooting [PMSG_HIBERNATE]
* @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE]
*/
.freeze = i915_pm_suspend,
.freeze_late = i915_pm_suspend_late,
.thaw_early = i915_pm_resume_early,
.thaw = i915_pm_resume,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_suspend_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
/* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
.runtime_resume = intel_runtime_resume,
};
......@@ -1643,7 +1609,7 @@ static struct drm_driver driver = {
.set_busid = drm_pci_set_busid,
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
.suspend = i915_suspend,
.suspend = i915_suspend_legacy,
.resume = i915_resume_legacy,
.device_is_agp = i915_driver_device_is_agp,
......
......@@ -55,7 +55,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20141003"
#define DRIVER_DATE "20141024"
enum pipe {
INVALID_PIPE = -1,
......@@ -460,7 +460,7 @@ struct drm_i915_display_funcs {
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
struct drm_crtc *crtc,
struct intel_crtc *crtc,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
......@@ -476,7 +476,7 @@ struct drm_i915_display_funcs {
struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int (*crtc_mode_set)(struct intel_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
void (*crtc_enable)(struct drm_crtc *crtc);
......@@ -1448,6 +1448,20 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
struct i915_wa_reg {
u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
};
#define I915_MAX_WA_REGS 16
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
};
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
......@@ -1527,6 +1541,8 @@ struct drm_i915_private {
struct intel_opregion opregion;
struct intel_vbt_data vbt;
bool preserve_bios_swizzle;
/* overlay */
struct intel_overlay *overlay;
......@@ -1590,19 +1606,7 @@ struct drm_i915_private {
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/*
* workarounds are currently applied at different places and
* changes are being done to consolidate them so exact count is
* not clear at this point, use a max value for now.
*/
#define I915_MAX_WA_REGS 16
struct {
u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
} intel_wa_regs[I915_MAX_WA_REGS];
u32 num_wa_regs;
struct i915_workarounds workarounds;
/* Reclocking support */
bool render_reclock_avail;
......@@ -2107,7 +2111,6 @@ struct drm_i915_cmd_table {
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
......@@ -2141,7 +2144,7 @@ struct drm_i915_cmd_table {
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
to_i915(dev)->ellc_size)
__I915__(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
......@@ -2178,13 +2181,15 @@ struct drm_i915_cmd_table {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
......@@ -2195,7 +2200,7 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
......@@ -2216,8 +2221,8 @@ struct drm_i915_cmd_table {
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
extern int i915_resume_legacy(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
......@@ -2312,6 +2317,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
#define ibx_enable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), (bits))
#define ibx_disable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), 0)
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
......
......@@ -1466,6 +1466,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
*
* IMPORTANT:
*
* DRM driver writers who look a this function as an example for how to do GEM
* mmap support, please don't implement mmap support like here. The modern way
* to implement DRM mmap support is with an mmap offset ioctl (like
* i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
* That way debug tooling like valgrind will understand what's going on, hiding
* the mmap call in a driver private ioctl will break that. The i915 driver only
* does cpu mmaps this way because we didn't know better.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
......@@ -2800,6 +2810,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 seqno = 0;
int ret = 0;
if (args->flags != 0)
return -EINVAL;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
......@@ -5259,7 +5272,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
unsigned long pinned, bound, unbound, freed;
unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
......@@ -5276,7 +5289,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
freed = i915_gem_shrink_all(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
......@@ -5307,14 +5320,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (unlock)
mutex_unlock(&dev->struct_mutex);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
freed, pinned);
freed_pages << PAGE_SHIFT, pinned);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
*(unsigned long *)ptr += freed;
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
......
......@@ -102,16 +102,26 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated with
* identically sized dimms. We don't need to check the 3rd
* channel because no cpu with gpu attached ships in that
* configuration. Also, swizzling only makes sense for 2
* channels anyway. */
/* Enable swizzling when the channels are populated
* with identically sized dimms. We don't need to check
* the 3rd channel because no cpu with gpu attached
* ships in that configuration. Also, swizzling only
* makes sense for 2 channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
......@@ -119,6 +129,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
......
......@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
......@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
#endif
This diff is collapsed.
......@@ -883,8 +883,8 @@ enum punit_power_well {
#define _VLV_PCS23_DW11_CH0 0x042c
#define _VLV_PCS01_DW11_CH1 0x262c
#define _VLV_PCS23_DW11_CH1 0x282c
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
......@@ -4054,17 +4054,18 @@ enum punit_power_well {
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_16 16
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_PRECISION_64 (1<<31)
#define DDL_CURSOR_PRECISION_32 (0<<31)
#define DDL_CURSOR_PRECISION_HIGH (1<<31)
#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite)))
#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
#define DDL_PLANE_PRECISION_64 (1<<7)
#define DDL_PLANE_PRECISION_32 (0<<7)
#define DDL_PLANE_PRECISION_HIGH (1<<7)
#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
#define DRAIN_LATENCY_MASK 0x7f
......@@ -4207,6 +4208,7 @@ enum punit_power_well {
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_ROTATE_180 (1<<15)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
......@@ -4579,6 +4581,9 @@ enum punit_power_well {
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
#define PLANE_CTL_ROTATE_180 0x2
#define _PLANE_STRIDE_1_A 0x70188
#define _PLANE_STRIDE_2_A 0x70288
#define _PLANE_STRIDE_3_A 0x70388
......
......@@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
&dev_attr_rc6_residency_ms.attr,
&dev_attr_rc6p_residency_ms.attr,
&dev_attr_rc6pp_residency_ms.attr,
NULL
};
......@@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
static struct attribute *rc6p_attrs[] = {
&dev_attr_rc6p_residency_ms.attr,
&dev_attr_rc6pp_residency_ms.attr,
NULL
};
static struct attribute_group rc6p_attr_group = {
.name = power_group_name,
.attrs = rc6p_attrs
};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
......@@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev)
int ret;
#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
if (HAS_RC6(dev)) {
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
if (HAS_RC6p(dev)) {
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6p_attr_group);
if (ret)
DRM_ERROR("RC6p residency sysfs setup failed\n");
}
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
......@@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev)
device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
#endif
}
......@@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
......
......@@ -1291,7 +1291,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
else if (IS_ULT(dev))
else if (IS_HSW_ULT(dev))
return 337500;
else
return 540000;
......
This diff is collapsed.
......@@ -278,7 +278,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
}
static enum drm_connector_status
intel_mst_port_dp_detect(struct drm_connector *connector)
intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
......@@ -286,14 +286,6 @@ intel_mst_port_dp_detect(struct drm_connector *connector)
return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
}
static enum drm_connector_status
intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
status = intel_mst_port_dp_detect(connector);
return status;
}
static int
intel_dp_mst_set_property(struct drm_connector *connector,
struct drm_property *property,
......
......@@ -755,12 +755,19 @@ static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
}
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable);
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder);
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
......@@ -779,7 +786,6 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void i9xx_check_fifo_underruns(struct drm_device *dev);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
......
This diff is collapsed.
......@@ -775,7 +775,8 @@ static void bdw_enable_backlight(struct intel_connector *connector)
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
/* BDW always uses the pch pwm controls. */
/* After LPT, override is the default. */
if (HAS_PCH_LPT(dev_priv))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
......
......@@ -1345,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
int *prec_mult,
int *drain_latency)
{
struct drm_device *dev = crtc->dev;
int entries;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
......@@ -1355,6 +1356,10 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
return false;
entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
if (IS_CHERRYVIEW(dev))
*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
DRAIN_LATENCY_PRECISION_16;
else
*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
DRAIN_LATENCY_PRECISION_32;
*drain_latency = (64 * (*prec_mult) * 4) / entries;
......@@ -1375,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
static void vlv_update_drain_latency(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pixel_size;
int drain_latency;
enum pipe pipe = intel_crtc->pipe;
int plane_prec, prec_mult, plane_dl;
const int high_precision = IS_CHERRYVIEW(dev) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
(DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
if (!intel_crtc_active(crtc)) {
......@@ -1394,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Primary plane Drain Latency */
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
DDL_PLANE_PRECISION_64 :
DDL_PLANE_PRECISION_32;
plane_prec = (prec_mult == high_precision) ?
DDL_PLANE_PRECISION_HIGH :
DDL_PLANE_PRECISION_LOW;
plane_dl |= plane_prec | drain_latency;
}
......@@ -1408,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
/* Program cursor DL only if it is enabled */
if (intel_crtc->cursor_base &&
vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
DDL_CURSOR_PRECISION_64 :
DDL_CURSOR_PRECISION_32;
plane_prec = (prec_mult == high_precision) ?
DDL_CURSOR_PRECISION_HIGH :
DDL_CURSOR_PRECISION_LOW;
plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
}
......@@ -1578,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
int plane_prec;
int sprite_dl;
int prec_mult;
const int high_precision = IS_CHERRYVIEW(dev) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
(DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
&drain_latency)) {
plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
DDL_SPRITE_PRECISION_64(sprite) :
DDL_SPRITE_PRECISION_32(sprite);
plane_prec = (prec_mult == high_precision) ?
DDL_SPRITE_PRECISION_HIGH(sprite) :
DDL_SPRITE_PRECISION_LOW(sprite);
sprite_dl |= plane_prec |
(drain_latency << DDL_SPRITE_SHIFT(sprite));
}
......@@ -3629,10 +3639,15 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
else
mode = 0;
}
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
if (HAS_RC6p(dev))
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
else
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
......@@ -3649,7 +3664,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
if (enable_rc6 >= 0) {
int mask;
if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
if (HAS_RC6p(dev))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
......@@ -5649,16 +5664,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/* FIXME(BDW): Check all the w/a, some might only apply to
* pre-production hw. */
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
......
......@@ -665,80 +665,108 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
return ret;
}
static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
u32 addr, u32 value)
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring)
{
int ret, i;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
return;
if (WARN_ON(w->count == 0))
return 0;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, addr);
intel_ring_emit(ring, value);
ring->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
/* value is updated with the status of remaining bits of this
* register when it is read from debugfs file
*/
dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
dev_priv->num_wa_regs++;
ret = intel_ring_begin(ring, (w->count * 2 + 2));
if (ret)
return ret;
return;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_ring_emit(ring, w->reg[i].addr);
intel_ring_emit(ring, w->reg[i].value);
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
ring->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
return 0;
}
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 val, const u32 mask)
{
const u32 idx = dev_priv->workarounds.count;
if (WARN_ON(idx >= I915_MAX_WA_REGS))
return -ENOSPC;
dev_priv->workarounds.reg[idx].addr = addr;
dev_priv->workarounds.reg[idx].value = val;
dev_priv->workarounds.reg[idx].mask = mask;
dev_priv->workarounds.count++;
return 0;
}
#define WA_REG(addr, val, mask) { \
const int r = wa_add(dev_priv, (addr), (val), (mask)); \
if (r) \
return r; \
}
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff)
#define WA_CLR_BIT_MASKED(addr, mask) \
WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff)
#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask)
#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask)
#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff)
static int bdw_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* workarounds applied in this fn are part of register state context,
* they need to be re-initialized followed by gpu reset, suspend/resume,
* module reload.
*/
dev_priv->num_wa_regs = 0;
memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
/*
* update the number of dwords required based on the
* actual number of workarounds applied
*/
ret = intel_ring_begin(ring, 18);
if (ret)
return ret;
/* WaDisablePartialInstShootdown:bdw */
/* WaDisableThreadStallDopClockGating:bdw */
/* FIXME: Unclear whether we really need this on production bdw. */
intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
| STALL_DOP_GATING_DISABLE));
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw May not be needed for production */
intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* WaDisableDopClockGating:bdw */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
intel_ring_emit_wa(ring, HDC_CHICKEN0,
_MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT |
(IS_BDW_GT3(dev) ?
HDC_FENCE_DEST_SLM_DISABLE : 0)
));
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* Wa4x4STCOptimizationDisable:bdw */
intel_ring_emit_wa(ring, CACHE_MODE_1,
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
WA_SET_BIT_MASKED(CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
......@@ -748,52 +776,50 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
intel_ring_emit_wa(ring, GEN7_GT_MODE,
WA_SET_BIT_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
intel_ring_advance(ring);
DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
dev_priv->num_wa_regs);
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* workarounds applied in this fn are part of register state context,
* they need to be re-initialized followed by gpu reset, suspend/resume,
* module reload.
*/
dev_priv->num_wa_regs = 0;
memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
ret = intel_ring_begin(ring, 12);
if (ret)
return ret;
/* WaDisablePartialInstShootdown:chv */
intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaDisableThreadStallDopClockGating:chv */
intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:chv (pre-production hw) */
intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
intel_ring_advance(ring);
return 0;
}
static int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(ring->id != RCS);
dev_priv->workarounds.count = 0;
if (IS_BROADWELL(dev))
return bdw_init_workarounds(ring);
if (IS_CHERRYVIEW(dev))
return chv_init_workarounds(ring);
return 0;
}
......@@ -853,7 +879,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
if (HAS_L3_DPF(dev))
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
return ret;
return init_workarounds_ring(ring);
}
static void render_ring_cleanup(struct intel_engine_cs *ring)
......@@ -2299,10 +2325,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
dev_priv->semaphore_obj = obj;
}
}
if (IS_CHERRYVIEW(dev))
ring->init_context = chv_init_workarounds;
else
ring->init_context = bdw_init_workarounds;
ring->init_context = intel_ring_workarounds_emit;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
......
......@@ -221,9 +221,9 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
hsw_power_well_post_enable(dev_priv);
}
hsw_power_well_post_enable(dev_priv);
} else {
if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
......
......@@ -162,6 +162,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
plane_ctl &= ~PLANE_CTL_TILED_MASK;
plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
/* Trickle feed has to be enabled */
plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
......@@ -217,6 +218,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
default:
BUG();
}
if (intel_plane->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
plane_ctl |= PLANE_CTL_ENABLE;
plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
......
......@@ -360,7 +360,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -386,6 +387,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
__intel_uncore_early_sanitize(dev, restore_forcewake);
i915_check_and_clear_faults(dev);
}
void intel_uncore_sanitize(struct drm_device *dev)
{
/* BIOS often leaves RC6 enabled, but disable it for hw init */
......@@ -823,6 +830,22 @@ __gen4_write(64)
#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
dev_priv->uncore.funcs.mmio_writew = x##_write16; \
dev_priv->uncore.funcs.mmio_writel = x##_write32; \
dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
} while (0)
#define ASSIGN_READ_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_readb = x##_read8; \
dev_priv->uncore.funcs.mmio_readw = x##_read16; \
dev_priv->uncore.funcs.mmio_readl = x##_read32; \
dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -830,7 +853,7 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
intel_uncore_early_sanitize(dev, false);
__intel_uncore_early_sanitize(dev, false);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
......@@ -879,76 +902,44 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
if (IS_CHERRYVIEW(dev)) {
dev_priv->uncore.funcs.mmio_writeb = chv_write8;
dev_priv->uncore.funcs.mmio_writew = chv_write16;
dev_priv->uncore.funcs.mmio_writel = chv_write32;
dev_priv->uncore.funcs.mmio_writeq = chv_write64;
dev_priv->uncore.funcs.mmio_readb = chv_read8;
dev_priv->uncore.funcs.mmio_readw = chv_read16;
dev_priv->uncore.funcs.mmio_readl = chv_read32;
dev_priv->uncore.funcs.mmio_readq = chv_read64;
ASSIGN_WRITE_MMIO_VFUNCS(chv);
ASSIGN_READ_MMIO_VFUNCS(chv);
} else {
dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
dev_priv->uncore.funcs.mmio_writew = gen8_write16;
dev_priv->uncore.funcs.mmio_writel = gen8_write32;
dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 7:
case 6:
if (IS_HASWELL(dev)) {
dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
dev_priv->uncore.funcs.mmio_writew = hsw_write16;
dev_priv->uncore.funcs.mmio_writel = hsw_write32;
dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
dev_priv->uncore.funcs.mmio_writew = gen6_write16;
dev_priv->uncore.funcs.mmio_writel = gen6_write32;
dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.mmio_readb = vlv_read8;
dev_priv->uncore.funcs.mmio_readw = vlv_read16;
dev_priv->uncore.funcs.mmio_readl = vlv_read32;
dev_priv->uncore.funcs.mmio_readq = vlv_read64;
ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 5:
dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
dev_priv->uncore.funcs.mmio_writew = gen5_write16;
dev_priv->uncore.funcs.mmio_writel = gen5_write32;
dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
dev_priv->uncore.funcs.mmio_readb = gen5_read8;
dev_priv->uncore.funcs.mmio_readw = gen5_read16;
dev_priv->uncore.funcs.mmio_readl = gen5_read32;
dev_priv->uncore.funcs.mmio_readq = gen5_read64;
ASSIGN_WRITE_MMIO_VFUNCS(gen5);
ASSIGN_READ_MMIO_VFUNCS(gen5);
break;
case 4:
case 3:
case 2:
dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
dev_priv->uncore.funcs.mmio_writew = gen4_write16;
dev_priv->uncore.funcs.mmio_writel = gen4_write32;
dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
dev_priv->uncore.funcs.mmio_readb = gen4_read8;
dev_priv->uncore.funcs.mmio_readw = gen4_read16;
dev_priv->uncore.funcs.mmio_readl = gen4_read32;
dev_priv->uncore.funcs.mmio_readq = gen4_read64;
ASSIGN_WRITE_MMIO_VFUNCS(gen4);
ASSIGN_READ_MMIO_VFUNCS(gen4);
break;
}
i915_check_and_clear_faults(dev);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
void intel_uncore_fini(struct drm_device *dev)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment