Commit f0aa848f authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
As promised a stash of (mostly) fixes. Two pieces of non-fixes included:
- A notch more gtt refactoring from Ben, beating to death with igt in our
  nightly testing.
- Support for display display-less server chips (again from Ben). New hw
  support which is only likely to break itself ;-)

Otherwise just tons of fixes:
- hpd irq storm mitigation from Egbert Eich. Your -next tree already has
  the infrastructure, this here just supplies the logic.
- sdvo hw state check fix from Egbert Eich
- fb cb tune settings for the pch pll clocks on cpt/ppt
- "Bring a bigger gun" coherence workaround for multi-threade, mulit-core
  & thrashing tiled gtt cpu access from Chris.
- Update haswell mPHY code.
- l3$ caching for context objects on ivb/hsw (Chris).
- dp aux refclock fix for haswell (Jani)
- moar overclocking fixes for snb/ivb (Ben)
- ecobits ppgtt pte caching control fixes from Ville
- fence stride check fixes and limit improvements (Ville)
- fix up crtc force restoring, potentially resulting in tons of hw state
  check WARNs
- OOPS fix for NULL derefencing of fb pointers when force-restoring a crtc
  when other crtcs are disabled and the force-restored crtc is _not_ the
  first one.
- Fix pfit disabling on gen2/3.
- Haswell ring freq scaling fixes (Chris).
- backlight init/teardown fix (failed eDP init killed the lvds backlight)
  from Jani
- cpt/ppt fdi polarity fixes from Paulo (should help a lot of the FDI link
  train failures).
- And a bunch of smaller things all over.
* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (56 commits)
  drm/i915: fix bpc vs. bpp confusion in intel_crtc_compute_config
  drm/i915: move cpu_transcoder to the pipe configuration
  drm/i915: preserve the PBC bits of TRANS_CHICKEN2
  drm/i915: set CPT FDI RX polarity bits based on VBT
  drm/i915: Add Reenable Timer to turn Hotplug Detection back on (v4)
  drm/i915: Disable HPD interrupt on pin when irq storm is detected (v3)
  drm/i915: Mask out the HPD irq bits before setting them individually.
  drm/i915: (re)init HPD interrupt storm statistics
  drm/i915: Add HPD IRQ storm detection (v5)
  drm/i915: WARN when LPT-LP is not paired with ULT CPU
  drm/i915: don't intel_crt_init on any ULT machines
  drm/i915: remove comment about IVB link training from intel_pm.c
  drm/i915: VLV doesn't have LLC
  drm/i915: Scale ring, rather than ia, frequency on Haswell
  drm/i915: shorten debugfs output simple attributes
  drm/i915: Fixup pfit disabling for gen2/3
  drm/i915: Fixup Oops in the pipe config computation
  drm/i915: ensure single initialization and cleanup of backlight device
  drm/i915: don't touch the PF regs if the power well is down
  drm/i915: add intel_using_power_well
  ...
parents e1adc78c bd080ee5
...@@ -901,7 +901,7 @@ i915_next_seqno_set(void *data, u64 val) ...@@ -901,7 +901,7 @@ i915_next_seqno_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set, i915_next_seqno_get, i915_next_seqno_set,
"next_seqno : 0x%llx\n"); "0x%llx\n");
static int i915_rstdby_delays(struct seq_file *m, void *unused) static int i915_rstdby_delays(struct seq_file *m, void *unused)
{ {
...@@ -1006,6 +1006,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -1006,6 +1006,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = rp_state_cap & 0xff; max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
max_freq * GT_FREQUENCY_MULTIPLIER); max_freq * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
} else { } else {
seq_printf(m, "no P-state info available\n"); seq_printf(m, "no P-state info available\n");
} }
...@@ -1354,7 +1357,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1354,7 +1357,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret) if (ret)
return ret; return ret;
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
for (gpu_freq = dev_priv->rps.min_delay; for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay; gpu_freq <= dev_priv->rps.max_delay;
...@@ -1363,7 +1366,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1363,7 +1366,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
sandybridge_pcode_read(dev_priv, sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq); &ia_freq);
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
gpu_freq * GT_FREQUENCY_MULTIPLIER,
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -1687,7 +1693,7 @@ i915_wedged_set(void *data, u64 val) ...@@ -1687,7 +1693,7 @@ i915_wedged_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
i915_wedged_get, i915_wedged_set, i915_wedged_get, i915_wedged_set,
"wedged : %llu\n"); "%llu\n");
static int static int
i915_ring_stop_get(void *data, u64 *val) i915_ring_stop_get(void *data, u64 *val)
...@@ -1841,7 +1847,7 @@ i915_max_freq_set(void *data, u64 val) ...@@ -1841,7 +1847,7 @@ i915_max_freq_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
i915_max_freq_get, i915_max_freq_set, i915_max_freq_get, i915_max_freq_set,
"max freq: %llu\n"); "%llu\n");
static int static int
i915_min_freq_get(void *data, u64 *val) i915_min_freq_get(void *data, u64 *val)
...@@ -1892,7 +1898,7 @@ i915_min_freq_set(void *data, u64 val) ...@@ -1892,7 +1898,7 @@ i915_min_freq_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
i915_min_freq_get, i915_min_freq_set, i915_min_freq_get, i915_min_freq_set,
"min freq: %llu\n"); "%llu\n");
static int static int
i915_cache_sharing_get(void *data, u64 *val) i915_cache_sharing_get(void *data, u64 *val)
......
...@@ -1322,6 +1322,10 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -1322,6 +1322,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */ /* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */ /* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1; dev->vblank_disable_allowed = 1;
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->mm.suspended = 0;
return 0;
}
ret = intel_fbdev_init(dev); ret = intel_fbdev_init(dev);
if (ret) if (ret)
...@@ -1514,6 +1518,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1514,6 +1518,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv; goto free_priv;
} }
mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
* in the same BAR, so we want to restrict this ioremap from
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
if (info->gen < 5)
mmio_size = 512*1024;
else
mmio_size = 2*1024*1024;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
intel_early_sanitize_regs(dev);
ret = i915_gem_gtt_init(dev); ret = i915_gem_gtt_init(dev);
if (ret) if (ret)
goto put_bridge; goto put_bridge;
...@@ -1538,28 +1564,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1538,28 +1564,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
* in the same BAR, so we want to restrict this ioremap from
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
if (info->gen < 5)
mmio_size = 512*1024;
else
mmio_size = 2*1024*1024;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_gmch;
}
intel_early_sanitize_regs(dev);
aperture_size = dev_priv->gtt.mappable_end; aperture_size = dev_priv->gtt.mappable_end;
dev_priv->gtt.mappable = dev_priv->gtt.mappable =
...@@ -1634,9 +1638,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1634,9 +1638,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2; dev_priv->num_plane = 2;
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); if (INTEL_INFO(dev)->num_pipes) {
if (ret) ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
goto out_gem_unload; if (ret)
goto out_gem_unload;
}
/* Start out suspended */ /* Start out suspended */
dev_priv->mm.suspended = 1; dev_priv->mm.suspended = 1;
...@@ -1651,9 +1657,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1651,9 +1657,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_setup_sysfs(dev); i915_setup_sysfs(dev);
/* Must be done after probing outputs */ if (INTEL_INFO(dev)->num_pipes) {
intel_opregion_init(dev); /* Must be done after probing outputs */
acpi_video_register(); intel_opregion_init(dev);
acpi_video_register();
}
if (IS_GEN5(dev)) if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv); intel_gpu_ips_init(dev_priv);
...@@ -1678,10 +1686,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1678,10 +1686,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->mm.gtt_mtrr = -1; dev_priv->mm.gtt_mtrr = -1;
} }
io_mapping_free(dev_priv->gtt.mappable); io_mapping_free(dev_priv->gtt.mappable);
dev_priv->gtt.gtt_remove(dev);
out_rmmap: out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
dev_priv->gtt.gtt_remove(dev);
put_bridge: put_bridge:
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
free_priv: free_priv:
......
...@@ -140,6 +140,16 @@ extern int intel_agp_enabled; ...@@ -140,6 +140,16 @@ extern int intel_agp_enabled;
.subdevice = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info } .driver_data = (unsigned long) info }
#define INTEL_QUANTA_VGA_DEVICE(info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = 0x16a, \
.subvendor = 0x152d, \
.subdevice = 0x8990, \
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = { static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
...@@ -272,12 +282,19 @@ static const struct intel_device_info intel_ivybridge_m_info = { ...@@ -272,12 +282,19 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.is_mobile = 1, .is_mobile = 1,
}; };
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
};
static const struct intel_device_info intel_valleyview_m_info = { static const struct intel_device_info intel_valleyview_m_info = {
GEN7_FEATURES, GEN7_FEATURES,
.is_mobile = 1, .is_mobile = 1,
.num_pipes = 2, .num_pipes = 2,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
.has_llc = 0, /* legal, last one wins */
}; };
static const struct intel_device_info intel_valleyview_d_info = { static const struct intel_device_info intel_valleyview_d_info = {
...@@ -285,6 +302,7 @@ static const struct intel_device_info intel_valleyview_d_info = { ...@@ -285,6 +302,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.num_pipes = 2, .num_pipes = 2,
.is_valleyview = 1, .is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
.has_llc = 0, /* legal, last one wins */
}; };
static const struct intel_device_info intel_haswell_d_info = { static const struct intel_device_info intel_haswell_d_info = {
...@@ -342,6 +360,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */ ...@@ -342,6 +360,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
...@@ -397,6 +416,15 @@ void intel_detect_pch(struct drm_device *dev) ...@@ -397,6 +416,15 @@ void intel_detect_pch(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pch; struct pci_dev *pch;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
*/
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
dev_priv->num_pch_pll = 0;
return;
}
/* /*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to * The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only * make graphics device passthrough work easy for VMM, that only
...@@ -431,11 +459,13 @@ void intel_detect_pch(struct drm_device *dev) ...@@ -431,11 +459,13 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->num_pch_pll = 0; dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n"); DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT; dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0; dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
} }
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
} }
...@@ -901,7 +931,11 @@ int i915_reset(struct drm_device *dev) ...@@ -901,7 +931,11 @@ int i915_reset(struct drm_device *dev)
ring->init(ring); ring->init(ring);
i915_gem_context_init(dev); i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev); if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret)
i915_gem_cleanup_aliasing_ppgtt(dev);
}
/* /*
* It would make sense to re-init all the other hw state, at * It would make sense to re-init all the other hw state, at
......
...@@ -195,9 +195,9 @@ struct drm_i915_master_private { ...@@ -195,9 +195,9 @@ struct drm_i915_master_private {
struct _drm_i915_sarea *sarea_priv; struct _drm_i915_sarea *sarea_priv;
}; };
#define I915_FENCE_REG_NONE -1 #define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 16 #define I915_MAX_NUM_FENCES 32
/* 16 fences + sign bit for FENCE_REG_NONE */ /* 32 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 5 #define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_fence_reg { struct drm_i915_fence_reg {
struct list_head lru_list; struct list_head lru_list;
...@@ -449,6 +449,7 @@ struct i915_hw_ppgtt { ...@@ -449,6 +449,7 @@ struct i915_hw_ppgtt {
struct sg_table *st, struct sg_table *st,
unsigned int pg_start, unsigned int pg_start,
enum i915_cache_level cache_level); enum i915_cache_level cache_level);
int (*enable)(struct drm_device *dev);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt); void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
}; };
...@@ -479,6 +480,7 @@ enum intel_pch { ...@@ -479,6 +480,7 @@ enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */ PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */ PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */ PCH_LPT, /* Lynxpoint PCH */
PCH_NOP,
}; };
enum intel_sbi_destination { enum intel_sbi_destination {
...@@ -666,6 +668,7 @@ struct intel_gen6_power_mgmt { ...@@ -666,6 +668,7 @@ struct intel_gen6_power_mgmt {
u8 cur_delay; u8 cur_delay;
u8 min_delay; u8 min_delay;
u8 max_delay; u8 max_delay;
u8 hw_max;
struct delayed_work delayed_resume_work; struct delayed_work delayed_resume_work;
...@@ -929,6 +932,16 @@ typedef struct drm_i915_private { ...@@ -929,6 +932,16 @@ typedef struct drm_i915_private {
struct work_struct hotplug_work; struct work_struct hotplug_work;
bool enable_hotplug_processing; bool enable_hotplug_processing;
struct {
unsigned long hpd_last_jiffies;
int hpd_cnt;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
HPD_MARK_DISABLED = 2
} hpd_mark;
} hpd_stats[HPD_NUM_PINS];
struct timer_list hotplug_reenable_timer;
int num_pch_pll; int num_pch_pll;
int num_plane; int num_plane;
...@@ -963,6 +976,7 @@ typedef struct drm_i915_private { ...@@ -963,6 +976,7 @@ typedef struct drm_i915_private {
unsigned int int_crt_support:1; unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1; unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1; unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
int lvds_ssc_freq; int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
struct { struct {
...@@ -1373,6 +1387,7 @@ struct drm_i915_file_private { ...@@ -1373,6 +1387,7 @@ struct drm_i915_file_private {
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
...@@ -1640,7 +1655,6 @@ int __must_check i915_gem_init(struct drm_device *dev); ...@@ -1640,7 +1655,6 @@ int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev); void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev);
......
...@@ -2683,17 +2683,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv, ...@@ -2683,17 +2683,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
return fence - dev_priv->fence_regs; return fence - dev_priv->fence_regs;
} }
static void i915_gem_write_fence__ipi(void *data)
{
wbinvd();
}
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence, struct drm_i915_fence_reg *fence,
bool enable) bool enable)
{ {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_device *dev = obj->base.dev;
int reg = fence_number(dev_priv, fence); struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg = fence_number(dev_priv, fence);
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
/* In order to fully serialize access to the fenced region and
* the update to the fence register we need to take extreme
* measures on SNB+. In theory, the write to the fence register
* flushes all memory transactions before, and coupled with the
* mb() placed around the register write we serialise all memory
* operations with respect to the changes in the tiler. Yet, on
* SNB+ we need to take a step further and emit an explicit wbinvd()
* on each processor in order to manually flush all memory
* transactions before updating the fence register.
*/
if (HAS_LLC(obj->base.dev))
on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
if (enable) { if (enable) {
obj->fence_reg = reg; obj->fence_reg = fence_reg;
fence->obj = obj; fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else { } else {
...@@ -3992,6 +4010,12 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -3992,6 +4010,12 @@ i915_gem_init_hw(struct drm_device *dev)
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
}
i915_gem_l3_remap(dev); i915_gem_l3_remap(dev);
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
...@@ -4005,7 +4029,13 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4005,7 +4029,13 @@ i915_gem_init_hw(struct drm_device *dev)
* contexts before PPGTT. * contexts before PPGTT.
*/ */
i915_gem_context_init(dev); i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev); if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret) {
i915_gem_cleanup_aliasing_ppgtt(dev);
DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
}
}
return 0; return 0;
} }
...@@ -4160,7 +4190,9 @@ i915_gem_load(struct drm_device *dev) ...@@ -4160,7 +4190,9 @@ i915_gem_load(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3; dev_priv->fence_reg_start = 3;
if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16; dev_priv->num_fence_regs = 16;
else else
dev_priv->num_fence_regs = 8; dev_priv->num_fence_regs = 8;
......
...@@ -152,6 +152,13 @@ create_hw_context(struct drm_device *dev, ...@@ -152,6 +152,13 @@ create_hw_context(struct drm_device *dev,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_LLC_MLC);
if (ret)
goto err_out;
}
/* The ring associated with the context object is handled by the normal /* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an * object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code. * assertion in the context switch code.
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
typedef uint32_t gtt_pte_t; typedef uint32_t gen6_gtt_pte_t;
/* PPGTT stuff */ /* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
...@@ -44,11 +44,11 @@ typedef uint32_t gtt_pte_t; ...@@ -44,11 +44,11 @@ typedef uint32_t gtt_pte_t;
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
dma_addr_t addr, dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level)
{ {
gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr); pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) { switch (level) {
...@@ -72,17 +72,84 @@ static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, ...@@ -72,17 +72,84 @@ static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
BUG(); BUG();
} }
return pte; return pte;
} }
static int gen6_ppgtt_enable(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
gen6_gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
pt_addr = ppgtt->pt_dma_addr[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
uint32_t ecochk, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
ecochk = I915_READ(GAM_ECOCHK);
if (IS_HASWELL(dev)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
I915_WRITE(GAM_ECOCHK, ecochk);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
return 0;
}
/* PPGTT support for Sandybdrige/Gen6 and later */ /* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry, unsigned first_entry,
unsigned num_entries) unsigned num_entries)
{ {
gtt_pte_t *pt_vaddr; gen6_gtt_pte_t *pt_vaddr, scratch_pte;
gtt_pte_t scratch_pte;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i; unsigned last_pte, i;
...@@ -114,7 +181,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, ...@@ -114,7 +181,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry, unsigned first_entry,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
gtt_pte_t *pt_vaddr; gen6_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
...@@ -170,6 +237,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -170,6 +237,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES; gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->clear_range = gen6_ppgtt_clear_range; ppgtt->clear_range = gen6_ppgtt_clear_range;
ppgtt->insert_entries = gen6_ppgtt_insert_entries; ppgtt->insert_entries = gen6_ppgtt_insert_entries;
ppgtt->cleanup = gen6_ppgtt_cleanup; ppgtt->cleanup = gen6_ppgtt_cleanup;
...@@ -203,12 +271,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -203,12 +271,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pt_dma_addr[i] = pt_addr; ppgtt->pt_dma_addr[i] = pt_addr;
} }
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
ppgtt->clear_range(ppgtt, 0, ppgtt->clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
return 0; return 0;
...@@ -240,8 +306,13 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) ...@@ -240,8 +306,13 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
return -ENOMEM; return -ENOMEM;
ppgtt->dev = dev; ppgtt->dev = dev;
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
else
BUG();
ret = gen6_ppgtt_init(ppgtt);
if (ret) if (ret)
kfree(ppgtt); kfree(ppgtt);
else else
...@@ -259,6 +330,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) ...@@ -259,6 +330,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
return; return;
ppgtt->cleanup(ppgtt); ppgtt->cleanup(ppgtt);
dev_priv->mm.aliasing_ppgtt = NULL;
} }
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
...@@ -278,64 +350,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, ...@@ -278,64 +350,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
} }
void i915_gem_init_ppgtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
pt_addr = ppgtt->pt_dma_addr[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
extern int intel_iommu_gfx_mapped; extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before /* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled. * unmapping anything from the GTT when VT-d is enabled.
...@@ -416,8 +430,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, ...@@ -416,8 +430,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
enum i915_cache_level level) enum i915_cache_level level)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t __iomem *gtt_entries = gen6_gtt_pte_t __iomem *gtt_entries =
(gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0; int i = 0;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
dma_addr_t addr; dma_addr_t addr;
...@@ -451,8 +465,8 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, ...@@ -451,8 +465,8 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
unsigned int num_entries) unsigned int num_entries)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte; gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i; int i;
...@@ -626,9 +640,12 @@ void i915_gem_init_global_gtt(struct drm_device *dev) ...@@ -626,9 +640,12 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
int ret; int ret;
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */ if (INTEL_INFO(dev)->gen <= 7) {
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; /* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
...@@ -736,10 +753,12 @@ static int gen6_gmch_probe(struct drm_device *dev, ...@@ -736,10 +753,12 @@ static int gen6_gmch_probe(struct drm_device *dev,
else else
*stolen = gen6_get_stolen_size(snb_gmch_ctl); *stolen = gen6_get_stolen_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT; *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
if (!dev_priv->gtt.gsm) { if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n"); DRM_ERROR("Failed to map the gtt page table\n");
...@@ -796,7 +815,6 @@ int i915_gem_gtt_init(struct drm_device *dev) ...@@ -796,7 +815,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_gtt *gtt = &dev_priv->gtt; struct i915_gtt *gtt = &dev_priv->gtt;
unsigned long gtt_size;
int ret; int ret;
if (INTEL_INFO(dev)->gen <= 5) { if (INTEL_INFO(dev)->gen <= 5) {
...@@ -814,8 +832,6 @@ int i915_gem_gtt_init(struct drm_device *dev) ...@@ -814,8 +832,6 @@ int i915_gem_gtt_init(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
/* GMADR is the PCI mmio aperture into the global GTT. */ /* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n", DRM_INFO("Memory usable by graphics device = %zdM\n",
dev_priv->gtt.total >> 20); dev_priv->gtt.total >> 20);
......
...@@ -217,9 +217,12 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) ...@@ -217,9 +217,12 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
tile_width = 512; tile_width = 512;
/* check maximum stride & object size */ /* check maximum stride & object size */
if (INTEL_INFO(dev)->gen >= 4) { /* i965+ stores the end address of the gtt mapping in the fence
/* i965 stores the end address of the gtt mapping in the fence * reg, so dont bother to check the size */
* reg, so dont bother to check the size */ if (INTEL_INFO(dev)->gen >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
} else if (INTEL_INFO(dev)->gen >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false; return false;
} else { } else {
...@@ -235,6 +238,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) ...@@ -235,6 +238,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
} }
} }
if (stride < tile_width)
return false;
/* 965+ just needs multiples of tile width */ /* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1)) if (stride & (tile_width - 1))
...@@ -243,9 +249,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) ...@@ -243,9 +249,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
} }
/* Pre-965 needs power of two tile widths */ /* Pre-965 needs power of two tile widths */
if (stride < tile_width)
return false;
if (stride & (stride - 1)) if (stride & (stride - 1))
return false; return false;
......
This diff is collapsed.
...@@ -125,8 +125,14 @@ ...@@ -125,8 +125,14 @@
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3) #define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3) #define ECOCHK_PPGTT_CACHE4B (0x0<<3)
#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
#define GAC_ECO_BITS 0x14090 #define GAC_ECO_BITS 0x14090
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8) #define ECOBITS_PPGTT_CACHE64B (3<<8)
#define ECOBITS_PPGTT_CACHE4B (0<<8) #define ECOBITS_PPGTT_CACHE4B (0<<8)
...@@ -424,6 +430,7 @@ ...@@ -424,6 +430,7 @@
#define FENCE_REG_SANDYBRIDGE_0 0x100000 #define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
/* control register for cpu gtt access */ /* control register for cpu gtt access */
#define TILECTL 0x101000 #define TILECTL 0x101000
...@@ -1203,6 +1210,9 @@ ...@@ -1203,6 +1210,9 @@
#define MCHBAR_MIRROR_BASE_SNB 0x140000 #define MCHBAR_MIRROR_BASE_SNB 0x140000
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK 0x5e04
/** 915-945 and GM965 MCH register controlling DRAM channel access */ /** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200 #define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
...@@ -3568,6 +3578,9 @@ ...@@ -3568,6 +3578,9 @@
#define DISP_ARB_CTL 0x45000 #define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15) #define DISP_FBC_WM_DIS (1<<15)
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
/* GEN7 chicken */ /* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 #define GEN7_COMMON_SLICE_CHICKEN1 0x7010
...@@ -3946,8 +3959,11 @@ ...@@ -3946,8 +3959,11 @@
#define _TRANSA_CHICKEN2 0xf0064 #define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064 #define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
#define SOUTH_CHICKEN1 0xc2000 #define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_OVR 19
...@@ -4380,6 +4396,7 @@ ...@@ -4380,6 +4396,7 @@
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define VLV_IOSF_DOORBELL_REQ 0x182100 #define VLV_IOSF_DOORBELL_REQ 0x182100
#define IOSF_DEVFN_SHIFT 24 #define IOSF_DEVFN_SHIFT 24
......
...@@ -239,7 +239,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -239,7 +239,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_device *dev = minor->dev; struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap, hw_max, hw_min; u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
ssize_t ret; ssize_t ret;
ret = kstrtou32(buf, 0, &val); ret = kstrtou32(buf, 0, &val);
...@@ -251,7 +251,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -251,7 +251,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff); hw_max = dev_priv->rps.hw_max;
non_oc_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16); hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
...@@ -259,6 +260,10 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -259,6 +260,10 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
return -EINVAL; return -EINVAL;
} }
if (val > non_oc_max)
DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER);
if (dev_priv->rps.cur_delay > val) if (dev_priv->rps.cur_delay > val)
gen6_set_rps(dev_priv->dev, val); gen6_set_rps(dev_priv->dev, val);
...@@ -302,7 +307,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -302,7 +307,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff); hw_max = dev_priv->rps.hw_max;
hw_min = ((rp_state_cap & 0xff0000) >> 16); hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
......
...@@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv, ...@@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_ssc_freq = dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq); intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode; dev_priv->display_clock_mode = general->display_clock_mode;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->int_tv_support, dev_priv->int_tv_support,
dev_priv->int_crt_support, dev_priv->int_crt_support,
dev_priv->lvds_use_ssc, dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq, dev_priv->lvds_ssc_freq,
dev_priv->display_clock_mode); dev_priv->display_clock_mode,
dev_priv->fdi_rx_polarity_inverted);
} }
} }
...@@ -692,6 +694,9 @@ intel_parse_bios(struct drm_device *dev) ...@@ -692,6 +694,9 @@ intel_parse_bios(struct drm_device *dev)
struct bdb_header *bdb = NULL; struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL; u8 __iomem *bios = NULL;
if (HAS_PCH_NOP(dev))
return -ENODEV;
init_vbt_defaults(dev_priv); init_vbt_defaults(dev_priv);
/* XXX Should this validation be moved to intel_opregion.c? */ /* XXX Should this validation be moved to intel_opregion.c? */
......
...@@ -127,7 +127,9 @@ struct bdb_general_features { ...@@ -127,7 +127,9 @@ struct bdb_general_features {
/* bits 3 */ /* bits 3 */
u8 disable_smooth_vision:1; u8 disable_smooth_vision:1;
u8 single_dvi:1; u8 single_dvi:1;
u8 rsvd9:6; /* finish byte */ u8 rsvd9:1;
u8 fdi_rx_polarity_inverted:1;
u8 rsvd10:4; /* finish byte */
/* bits 4 */ /* bits 4 */
u8 legacy_monitor_detect; u8 legacy_monitor_detect;
......
...@@ -787,10 +787,8 @@ void intel_crt_init(struct drm_device *dev) ...@@ -787,10 +787,8 @@ void intel_crt_init(struct drm_device *dev)
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
if (I915_HAS_HOTPLUG(dev)) if (!I915_HAS_HOTPLUG(dev))
connector->polled = DRM_CONNECTOR_POLL_HPD; intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/* /*
* Configure the automatic hotplug detection stuff * Configure the automatic hotplug detection stuff
......
...@@ -924,7 +924,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) ...@@ -924,7 +924,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
int type = intel_encoder->type; int type = intel_encoder->type;
uint32_t temp; uint32_t temp;
...@@ -958,7 +958,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) ...@@ -958,7 +958,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct drm_i915_private *dev_priv = crtc->dev->dev_private;
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder); enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type; int type = intel_encoder->type;
uint32_t temp; uint32_t temp;
...@@ -1223,7 +1223,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) ...@@ -1223,7 +1223,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder); enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP) if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
...@@ -1233,7 +1233,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) ...@@ -1233,7 +1233,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{ {
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP) if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
......
This diff is collapsed.
...@@ -353,10 +353,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, ...@@ -353,10 +353,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev)) } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
aux_clock_divider = 74;
} else if (HAS_PCH_SPLIT(dev)) {
aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
else } else {
aux_clock_divider = intel_hrawclk(dev) / 2; aux_clock_divider = intel_hrawclk(dev) / 2;
}
if (IS_GEN6(dev)) if (IS_GEN6(dev))
precharge = 3; precharge = 3;
...@@ -2470,17 +2474,14 @@ intel_dp_set_property(struct drm_connector *connector, ...@@ -2470,17 +2474,14 @@ intel_dp_set_property(struct drm_connector *connector,
static void static void
intel_dp_destroy(struct drm_connector *connector) intel_dp_destroy(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
if (!IS_ERR_OR_NULL(intel_connector->edid)) if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid); kfree(intel_connector->edid);
if (is_edp(intel_dp)) { if (is_edp(intel_dp))
intel_panel_destroy_backlight(dev);
intel_panel_fini(&intel_connector->panel); intel_panel_fini(&intel_connector->panel);
}
drm_sysfs_connector_remove(connector); drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
...@@ -2789,7 +2790,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -2789,7 +2790,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = true; connector->interlace_allowed = true;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
......
...@@ -171,6 +171,10 @@ struct intel_connector { ...@@ -171,6 +171,10 @@ struct intel_connector {
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid; struct edid *edid;
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
}; };
struct intel_crtc_config { struct intel_crtc_config {
...@@ -184,6 +188,10 @@ struct intel_crtc_config { ...@@ -184,6 +188,10 @@ struct intel_crtc_config {
* between pch encoders and cpu encoders. */ * between pch encoders and cpu encoders. */
bool has_pch_encoder; bool has_pch_encoder;
/* CPU Transcoder for the pipe. Currently this can only differ from the
* pipe on Haswell (where we have a special eDP transcoder). */
enum transcoder cpu_transcoder;
/* /*
* Use reduced/limited/broadcast rbg range, compressing from the full * Use reduced/limited/broadcast rbg range, compressing from the full
* range fed into the crtcs. * range fed into the crtcs.
...@@ -222,7 +230,6 @@ struct intel_crtc { ...@@ -222,7 +230,6 @@ struct intel_crtc {
struct drm_crtc base; struct drm_crtc base;
enum pipe pipe; enum pipe pipe;
enum plane plane; enum plane plane;
enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256]; u8 lut_r[256], lut_g[256], lut_b[256];
/* /*
* Whether the crtc and the connected output pipeline is active. Implies * Whether the crtc and the connected output pipeline is active. Implies
...@@ -693,6 +700,7 @@ extern void intel_update_fbc(struct drm_device *dev); ...@@ -693,6 +700,7 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void); extern void intel_gpu_ips_teardown(void);
extern bool intel_using_power_well(struct drm_device *dev);
extern void intel_init_power_well(struct drm_device *dev); extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable); extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_enable_gt_powersave(struct drm_device *dev);
......
...@@ -283,6 +283,9 @@ void intel_fb_restore_mode(struct drm_device *dev) ...@@ -283,6 +283,9 @@ void intel_fb_restore_mode(struct drm_device *dev)
struct drm_mode_config *config = &dev->mode_config; struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane; struct drm_plane *plane;
if (INTEL_INFO(dev)->num_pipes == 0)
return;
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
......
...@@ -294,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, ...@@ -294,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder); u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->cpu_transcoder); u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
unsigned int i, len = DIP_HEADER_SIZE + frame->len; unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(ctl_reg); u32 val = I915_READ(ctl_reg);
...@@ -570,7 +570,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, ...@@ -570,7 +570,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->dev->dev_private; struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder); u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi); assert_hdmi_port_disabled(intel_hdmi);
...@@ -998,7 +998,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -998,7 +998,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_MODE_CONNECTOR_HDMIA); DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1; connector->interlace_allowed = 1;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
......
...@@ -522,7 +522,9 @@ int intel_setup_gmbus(struct drm_device *dev) ...@@ -522,7 +522,9 @@ int intel_setup_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i; int ret, i;
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_NOP(dev))
return 0;
else if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
......
...@@ -631,7 +631,6 @@ static void intel_lvds_destroy(struct drm_connector *connector) ...@@ -631,7 +631,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid); kfree(lvds_connector->base.edid);
intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel); intel_panel_fini(&lvds_connector->base.panel);
drm_sysfs_connector_remove(connector); drm_sysfs_connector_remove(connector);
......
...@@ -428,6 +428,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector) ...@@ -428,6 +428,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
intel_panel_init_backlight(dev); intel_panel_init_backlight(dev);
if (WARN_ON(dev_priv->backlight.device))
return -ENODEV;
memset(&props, 0, sizeof(props)); memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW; props.type = BACKLIGHT_RAW;
props.brightness = dev_priv->backlight.level; props.brightness = dev_priv->backlight.level;
...@@ -453,8 +456,10 @@ int intel_panel_setup_backlight(struct drm_connector *connector) ...@@ -453,8 +456,10 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
void intel_panel_destroy_backlight(struct drm_device *dev) void intel_panel_destroy_backlight(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight.device) if (dev_priv->backlight.device) {
backlight_device_unregister(dev_priv->backlight.device); backlight_device_unregister(dev_priv->backlight.device);
dev_priv->backlight.device = NULL;
}
} }
#else #else
int intel_panel_setup_backlight(struct drm_connector *connector) int intel_panel_setup_backlight(struct drm_connector *connector)
......
...@@ -2558,8 +2558,8 @@ static void gen6_enable_rps(struct drm_device *dev) ...@@ -2558,8 +2558,8 @@ static void gen6_enable_rps(struct drm_device *dev)
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 100MHz */ /* In units of 50MHz */
dev_priv->rps.max_delay = rp_state_cap & 0xff; dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
dev_priv->rps.cur_delay = 0; dev_priv->rps.cur_delay = 0;
...@@ -2643,10 +2643,10 @@ static void gen6_enable_rps(struct drm_device *dev) ...@@ -2643,10 +2643,10 @@ static void gen6_enable_rps(struct drm_device *dev)
pcu_mbox = 0; pcu_mbox = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max from %dMHz to %dMHz\n", DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
(dev_priv->rps.max_delay & 0xff) * 50, (dev_priv->rps.max_delay & 0xff) * 50,
(pcu_mbox & 0xff) * 50); (pcu_mbox & 0xff) * 50);
dev_priv->rps.max_delay = pcu_mbox & 0xff; dev_priv->rps.hw_max = pcu_mbox & 0xff;
} }
} else { } else {
DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
...@@ -2684,8 +2684,8 @@ static void gen6_update_ring_freq(struct drm_device *dev) ...@@ -2684,8 +2684,8 @@ static void gen6_update_ring_freq(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15; int min_freq = 15;
int gpu_freq; unsigned int gpu_freq;
unsigned int ia_freq, max_ia_freq; unsigned int max_ia_freq, min_ring_freq;
int scaling_factor = 180; int scaling_factor = 180;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
...@@ -2701,6 +2701,10 @@ static void gen6_update_ring_freq(struct drm_device *dev) ...@@ -2701,6 +2701,10 @@ static void gen6_update_ring_freq(struct drm_device *dev)
/* Convert from kHz to MHz */ /* Convert from kHz to MHz */
max_ia_freq /= 1000; max_ia_freq /= 1000;
min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
/* convert DDR frequency from units of 133.3MHz to bandwidth */
min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
/* /*
* For each potential GPU frequency, load a ring frequency we'd like * For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency * to use for memory access. We do this by specifying the IA frequency
...@@ -2709,21 +2713,32 @@ static void gen6_update_ring_freq(struct drm_device *dev) ...@@ -2709,21 +2713,32 @@ static void gen6_update_ring_freq(struct drm_device *dev)
for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) { gpu_freq--) {
int diff = dev_priv->rps.max_delay - gpu_freq; int diff = dev_priv->rps.max_delay - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
/*
* For GPU frequencies less than 750MHz, just use the lowest if (IS_HASWELL(dev)) {
* ring freq. ring_freq = (gpu_freq * 5 + 3) / 4;
*/ ring_freq = max(min_ring_freq, ring_freq);
if (gpu_freq < min_freq) /* leave ia_freq as the default, chosen by cpufreq */
ia_freq = 800; } else {
else /* On older processors, there is no separate ring
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); * clock domain, so in order to boost the bandwidth
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); * of the ring, we need to upclock the CPU (ia_freq).
ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; *
* For GPU frequencies less than 750MHz,
* just use the lowest ring freq.
*/
if (gpu_freq < min_freq)
ia_freq = 800;
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
}
sandybridge_pcode_write(dev_priv, sandybridge_pcode_write(dev_priv,
GEN6_PCODE_WRITE_MIN_FREQ_TABLE, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
ia_freq | gpu_freq); ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
gpu_freq);
} }
} }
...@@ -3575,6 +3590,7 @@ static void cpt_init_clock_gating(struct drm_device *dev) ...@@ -3575,6 +3590,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe; int pipe;
uint32_t val;
/* /*
* On Ibex Peak and Cougar Point, we need to disable clock * On Ibex Peak and Cougar Point, we need to disable clock
...@@ -3587,8 +3603,17 @@ static void cpt_init_clock_gating(struct drm_device *dev) ...@@ -3587,8 +3603,17 @@ static void cpt_init_clock_gating(struct drm_device *dev)
/* The below fixes the weird display corruption, a few pixels shifted /* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY. * downward, on (only) LVDS of some HP laptops with IVY.
*/ */
for_each_pipe(pipe) for_each_pipe(pipe) {
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE); val = I915_READ(TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
I915_WRITE(TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */ /* WADP0ClockGatingDisable */
for_each_pipe(pipe) { for_each_pipe(pipe) {
I915_WRITE(TRANS_CHICKEN1(pipe), I915_WRITE(TRANS_CHICKEN1(pipe),
...@@ -3890,7 +3915,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) ...@@ -3890,7 +3915,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr |= GEN6_MBC_SNPCR_MED; snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
cpt_init_clock_gating(dev); if (!HAS_PCH_NOP(dev))
cpt_init_clock_gating(dev);
gen6_check_mch_setup(dev); gen6_check_mch_setup(dev);
} }
...@@ -4084,6 +4110,22 @@ void intel_init_clock_gating(struct drm_device *dev) ...@@ -4084,6 +4110,22 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_clock_gating(dev); dev_priv->display.init_clock_gating(dev);
} }
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
bool intel_using_power_well(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_HASWELL(dev))
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
else
return true;
}
void intel_set_power_well(struct drm_device *dev, bool enable) void intel_set_power_well(struct drm_device *dev, bool enable)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -4190,7 +4232,6 @@ void intel_init_pm(struct drm_device *dev) ...@@ -4190,7 +4232,6 @@ void intel_init_pm(struct drm_device *dev)
} }
dev_priv->display.init_clock_gating = gen6_init_clock_gating; dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) { } else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) { if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = ivybridge_update_wm; dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
......
...@@ -1231,12 +1231,8 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) ...@@ -1231,12 +1231,8 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
struct intel_sdvo_connector *intel_sdvo_connector = struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base); to_intel_sdvo_connector(&connector->base);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
struct drm_i915_private *dev_priv = intel_sdvo->base.base.dev->dev_private;
u16 active_outputs; u16 active_outputs;
if (!(I915_READ(intel_sdvo->sdvo_reg) & SDVO_ENABLE))
return false;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
if (active_outputs & intel_sdvo_connector->output_flag) if (active_outputs & intel_sdvo_connector->output_flag)
...@@ -1251,11 +1247,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, ...@@ -1251,11 +1247,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
u16 active_outputs;
u32 tmp; u32 tmp;
tmp = I915_READ(intel_sdvo->sdvo_reg); tmp = I915_READ(intel_sdvo->sdvo_reg);
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
if (!(tmp & SDVO_ENABLE)) if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
return false; return false;
if (HAS_PCH_CPT(dev)) if (HAS_PCH_CPT(dev))
...@@ -2276,7 +2274,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2276,7 +2274,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector = &intel_connector->base; connector = &intel_connector->base;
if (intel_sdvo_get_hotplug_support(intel_sdvo) & if (intel_sdvo_get_hotplug_support(intel_sdvo) &
intel_sdvo_connector->output_flag) { intel_sdvo_connector->output_flag) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/* Some SDVO devices have one-shot hotplug interrupts. /* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens. * Ensure that they get re-enabled when an interrupt happens.
...@@ -2284,7 +2281,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2284,7 +2281,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_encoder->hot_plug = intel_sdvo_enable_hotplug; intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder); intel_sdvo_enable_hotplug(intel_encoder);
} else { } else {
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
} }
encoder->encoder_type = DRM_MODE_ENCODER_TMDS; encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID; connector->connector_type = DRM_MODE_CONNECTOR_DVID;
...@@ -2353,7 +2350,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2353,7 +2350,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base; intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base; connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT; intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC; encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA; connector->connector_type = DRM_MODE_CONNECTOR_VGA;
...@@ -2746,7 +2743,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) ...@@ -2746,7 +2743,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct intel_sdvo *intel_sdvo; struct intel_sdvo *intel_sdvo;
u32 hotplug_mask; u32 hotplug_mask;
int i; int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo) if (!intel_sdvo)
return false; return false;
......
...@@ -1613,7 +1613,7 @@ intel_tv_init(struct drm_device *dev) ...@@ -1613,7 +1613,7 @@ intel_tv_init(struct drm_device *dev)
* *
* More recent chipsets favour HDMI rather than integrated S-Video. * More recent chipsets favour HDMI rather than integrated S-Video.
*/ */
connector->polled = DRM_CONNECTOR_POLL_CONNECT; intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs, drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO); DRM_MODE_CONNECTOR_SVIDEO);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment