Commit f15b4ca2 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2012-05-20' of...

Merge tag 'drm-intel-next-2012-05-20' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next

Daniel wrote:

The last pull I'd like to squeeze into 3.5, safe for the hsw stuff mostly
bugfixes:
- last few patches for basic hsw enabling (Eugeni, infoframe support by
 Paulo)
- Fix up infoframe support, we've hopefully squashed all the cargo-culting
 in there (Paulo). Among all the issues, this finally fixes some of the
 infoframe regressions seen on g4x and snb systems.
- Fixup sdvo infoframe support, this fixes a regression from 2.6.37.
- Correctly enable semaphores on snb, we've enabled it already for 3.5,
 but the dmar check was slightly wrong.
- gen6 irq fixlets from Chris.
- disable gmbus on i830, the hw seems to be simply broken.
- fix up the pch pll fallout (Chris & me).
- for_each_ring macro from Chris - I've figured I'll merge this now to
 avoid backport pain.
- complain when the rps state isn't what we expect (Chris). Note that this
 is shockingly easy to hit and hence pretty much will cause a regression
 report. But it only tells us that the gpu turbo state got out of whack,
 a problem we know off since a long time (it cause the gpu to get stuck a
 a fixed frequency, usually the lowest one). Chris is working on a fix,
 but we haven't yet found a magic formula that works perfectly (only
 patches that massively reduce the frequency of this happening).
- MAINTAINERS patch, I'm now officially the guy to beat up."

* tag 'drm-intel-next-2012-05-20' of git://people.freedesktop.org/~danvet/drm-intel: (57 commits)
  drm/i915: IBX has a fixed pch pll to pch pipe mapping
  drm/i915: implement hsw_write_infoframe
  drm/i915: small hdmi coding style cleanups
  drm/i915: fixup infoframe support for sdvo
  drm/i915: Enable the PCH PLL for all generations after link training
  drm/i915: Convert BUG_ON(!pll->active) and friends to a WARN
  drm/i915: don't clobber the pipe param in sanitize_modesetting
  drm/i915: disable gmbus on i830
  drm/i915: Replace the feature tests for BLT/BSD with ring init checks
  drm/i915: Check whether the ring is initialised prior to dispatch
  drm/i915: Introduce for_each_ring() macro
  drm/i915: Assert that the transcoder is indeed off before modifying it
  drm/i915: hook Haswell devices in place
  drm/i915: prepare HDMI link for Haswell
  drm/i915: move HDMI structs to shared location
  drm/i915: add WR PLL programming table
  drm/i915: add support for DDI-controlled digital outputs
  drm/i915: detect digital outputs on Haswell
  drm/i915: program iCLKIP on Lynx Point
  drm/i915: program WM_LINETIME on Haswell
  ...
parents 64172ccb 98b6bd99
...@@ -2373,10 +2373,10 @@ F: drivers/gpu/drm/ ...@@ -2373,10 +2373,10 @@ F: drivers/gpu/drm/
F: include/drm/ F: include/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Keith Packard <keithp@keithp.com> M: Daniel Vetter <daniel.vetter@ffwll.ch>
L: intel-gfx@lists.freedesktop.org (subscribers-only) L: intel-gfx@lists.freedesktop.org (subscribers-only)
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux.git T: git git://people.freedesktop.org/~danvet/drm-intel
S: Supported S: Supported
F: drivers/gpu/drm/i915 F: drivers/gpu/drm/i915
F: include/drm/i915* F: include/drm/i915*
......
...@@ -908,6 +908,10 @@ static struct pci_device_id agp_intel_pci_table[] = { ...@@ -908,6 +908,10 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB), ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
{ } { }
}; };
......
...@@ -19,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ ...@@ -19,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_crt.o \ intel_crt.o \
intel_lvds.o \ intel_lvds.o \
intel_bios.o \ intel_bios.o \
intel_ddi.o \
intel_dp.o \ intel_dp.o \
intel_hdmi.o \ intel_hdmi.o \
intel_sdvo.o \ intel_sdvo.o \
......
...@@ -699,6 +699,7 @@ static int i915_error_state(struct seq_file *m, void *unused) ...@@ -699,6 +699,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
struct drm_device *dev = error_priv->dev; struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_state *error = error_priv->error;
struct intel_ring_buffer *ring;
int i, j, page, offset, elt; int i, j, page, offset, elt;
if (!error) { if (!error) {
...@@ -706,7 +707,6 @@ static int i915_error_state(struct seq_file *m, void *unused) ...@@ -706,7 +707,6 @@ static int i915_error_state(struct seq_file *m, void *unused)
return 0; return 0;
} }
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec); error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
...@@ -722,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused) ...@@ -722,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
} }
i915_ring_error_state(m, dev, error, RCS); for_each_ring(ring, dev_priv, i)
if (HAS_BLT(dev)) i915_ring_error_state(m, dev, error, i);
i915_ring_error_state(m, dev, error, BCS);
if (HAS_BSD(dev))
i915_ring_error_state(m, dev, error, VCS);
if (error->active_bo) if (error->active_bo)
print_error_buffers(m, "Active", print_error_buffers(m, "Active",
......
...@@ -980,10 +980,10 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -980,10 +980,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_BSD: case I915_PARAM_HAS_BSD:
value = HAS_BSD(dev); value = intel_ring_initialized(&dev_priv->ring[VCS]);
break; break;
case I915_PARAM_HAS_BLT: case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev); value = intel_ring_initialized(&dev_priv->ring[BCS]);
break; break;
case I915_PARAM_HAS_RELAXED_FENCING: case I915_PARAM_HAS_RELAXED_FENCING:
value = 1; value = 1;
......
...@@ -345,6 +345,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */ ...@@ -345,6 +345,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
{0, 0, 0} {0, 0, 0}
}; };
...@@ -407,9 +414,11 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) ...@@ -407,9 +414,11 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915_semaphores >= 0) if (i915_semaphores >= 0)
return i915_semaphores; return i915_semaphores;
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */ /* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6) if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return !intel_iommu_enabled; return false;
#endif
return 1; return 1;
} }
...@@ -622,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev) ...@@ -622,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */ /* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (HAS_PCH_SPLIT(dev))
ironlake_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0; dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev); error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (HAS_PCH_SPLIT(dev)) intel_modeset_init_hw(dev);
ironlake_init_pch_refclk(dev);
drm_mode_config_reset(dev); drm_mode_config_reset(dev);
drm_irq_install(dev); drm_irq_install(dev);
...@@ -638,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev) ...@@ -638,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev); drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
} }
intel_opregion_init(dev); intel_opregion_init(dev);
...@@ -886,15 +893,15 @@ int i915_reset(struct drm_device *dev) ...@@ -886,15 +893,15 @@ int i915_reset(struct drm_device *dev)
*/ */
if (drm_core_check_feature(dev, DRIVER_MODESET) || if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) { !dev_priv->mm.suspended) {
struct intel_ring_buffer *ring;
int i;
dev_priv->mm.suspended = 0; dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); for_each_ring(ring, dev_priv, i)
if (HAS_BSD(dev)) ring->init(ring);
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
i915_gem_init_ppgtt(dev); i915_gem_init_ppgtt(dev);
......
...@@ -243,6 +243,8 @@ struct drm_i915_display_funcs { ...@@ -243,6 +243,8 @@ struct drm_i915_display_funcs {
void (*update_sprite_wm)(struct drm_device *dev, int pipe, void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size); uint32_t sprite_width, int pixel_size);
void (*sanitize_pm)(struct drm_device *dev); void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc, int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, struct drm_display_mode *adjusted_mode,
...@@ -408,9 +410,7 @@ typedef struct drm_i915_private { ...@@ -408,9 +410,7 @@ typedef struct drm_i915_private {
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer; struct timer_list hangcheck_timer;
int hangcheck_count; int hangcheck_count;
uint32_t last_acthd; uint32_t last_acthd[I915_NUM_RINGS];
uint32_t last_acthd_bsd;
uint32_t last_acthd_blt;
uint32_t last_instdone; uint32_t last_instdone;
uint32_t last_instdone1; uint32_t last_instdone1;
...@@ -818,6 +818,11 @@ typedef struct drm_i915_private { ...@@ -818,6 +818,11 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property; struct drm_property *force_audio_property;
} drm_i915_private_t; } drm_i915_private_t;
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
enum hdmi_force_audio { enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
......
...@@ -1655,10 +1655,11 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -1655,10 +1655,11 @@ void i915_gem_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i; int i;
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); i915_gem_reset_ring_lists(dev_priv, ring);
/* Remove anything from the flushing lists. The GPU cache is likely /* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the * to be lost on reset along with the data, so simply move the
...@@ -1763,10 +1764,11 @@ void ...@@ -1763,10 +1764,11 @@ void
i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_requests(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i; int i;
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
i915_gem_retire_requests_ring(&dev_priv->ring[i]); i915_gem_retire_requests_ring(ring);
} }
static void static void
...@@ -1774,6 +1776,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1774,6 +1776,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv; drm_i915_private_t *dev_priv;
struct drm_device *dev; struct drm_device *dev;
struct intel_ring_buffer *ring;
bool idle; bool idle;
int i; int i;
...@@ -1793,9 +1796,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1793,9 +1796,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
* objects indefinitely. * objects indefinitely.
*/ */
idle = true; idle = true;
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (!list_empty(&ring->gpu_write_list)) { if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret; int ret;
...@@ -2137,13 +2138,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) ...@@ -2137,13 +2138,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
int i915_gpu_idle(struct drm_device *dev) int i915_gpu_idle(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i; int ret, i;
/* Flush everything onto the inactive list. */ /* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
ret = i915_ring_idle(&dev_priv->ring[i]); ret = i915_ring_idle(ring);
if (ret) if (ret)
return ret; return ret;
/* Is the device fubar? */
if (WARN_ON(!list_empty(&ring->gpu_write_list)))
return -EBUSY;
} }
return 0; return 0;
...@@ -3463,9 +3469,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev) ...@@ -3463,9 +3469,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
/* GFX_MODE is per-ring on gen7+ */ /* GFX_MODE is per-ring on gen7+ */
} }
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
ring = &dev_priv->ring[i];
if (INTEL_INFO(dev)->gen >= 7) if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring), I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
...@@ -3581,10 +3585,11 @@ void ...@@ -3581,10 +3585,11 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev) i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i; int i;
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
intel_cleanup_ring_buffer(&dev_priv->ring[i]); intel_cleanup_ring_buffer(ring);
} }
int int
...@@ -3592,7 +3597,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ...@@ -3592,7 +3597,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i; int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0; return 0;
...@@ -3614,10 +3619,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ...@@ -3614,10 +3619,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
for (i = 0; i < I915_NUM_RINGS; i++) {
BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev); ret = drm_irq_install(dev);
......
...@@ -168,7 +168,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -168,7 +168,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next; struct drm_i915_gem_object *obj, *next;
bool lists_empty; bool lists_empty;
int ret,i; int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) && lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.flushing_list) &&
...@@ -178,17 +178,13 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -178,17 +178,13 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only); trace_i915_gem_evict_everything(dev, purgeable_only);
ret = i915_gpu_idle(dev);
if (ret)
return ret;
/* The gpu_idle will flush everything in the write domain to the /* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list * active list. Then we must move everything off the active list
* with retire requests. * with retire requests.
*/ */
for (i = 0; i < I915_NUM_RINGS; i++) ret = i915_gpu_idle(dev);
if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list))) if (ret)
return -EBUSY; return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
...@@ -203,5 +199,5 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -203,5 +199,5 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
} }
} }
return ret; return 0;
} }
...@@ -967,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, ...@@ -967,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->pending_gpu_write = true; obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list, list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list); &ring->gpu_write_list);
if (obj->pin_count) /* check for potential scanout */
intel_mark_busy(ring->dev, obj); intel_mark_busy(ring->dev, obj);
} }
trace_i915_gem_object_change_domain(obj, old_read, old_write); trace_i915_gem_object_change_domain(obj, old_read, old_write);
} }
intel_mark_busy(ring->dev, NULL);
} }
static void static void
...@@ -1061,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1061,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->ring[RCS]; ring = &dev_priv->ring[RCS];
break; break;
case I915_EXEC_BSD: case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS]; ring = &dev_priv->ring[VCS];
break; break;
case I915_EXEC_BLT: case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS]; ring = &dev_priv->ring[BCS];
break; break;
default: default:
...@@ -1079,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1079,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
(int)(args->flags & I915_EXEC_RING_MASK)); (int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL; return -EINVAL;
} }
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK; mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK; mask = I915_EXEC_CONSTANTS_MASK;
......
...@@ -533,14 +533,11 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) ...@@ -533,14 +533,11 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
return ret; return ret;
} }
static void pch_irq_handler(struct drm_device *dev) static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 pch_iir;
int pipe; int pipe;
pch_iir = I915_READ(SDEIIR);
if (pch_iir & SDE_AUDIO_POWER_MASK) if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >> (pch_iir & SDE_AUDIO_POWER_MASK) >>
...@@ -580,72 +577,61 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) ...@@ -580,72 +577,61 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pm_iir;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; irqreturn_t ret = IRQ_NONE;
int i;
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */ /* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER); de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR); gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR); if (gt_iir) {
pm_iir = I915_READ(GEN6_PMIIR);
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
goto done;
ret = IRQ_HANDLED;
snb_gt_irq_handler(dev, dev_priv, gt_iir); snb_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
if (de_iir & DE_GSE_IVB) if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev); intel_opregion_gse_intr(dev);
if (de_iir & DE_PLANEA_FLIP_DONE_IVB) { for (i = 0; i < 3; i++) {
intel_prepare_page_flip(dev, 0); if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_finish_page_flip_plane(dev, 0); intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
} }
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
if (de_iir & DE_PLANEB_FLIP_DONE_IVB) { drm_handle_vblank(dev, i);
intel_prepare_page_flip(dev, 1);
intel_finish_page_flip_plane(dev, 1);
} }
if (de_iir & DE_PLANEC_FLIP_DONE_IVB) {
intel_prepare_page_flip(dev, 2);
intel_finish_page_flip_plane(dev, 2);
}
if (de_iir & DE_PIPEA_VBLANK_IVB)
drm_handle_vblank(dev, 0);
if (de_iir & DE_PIPEB_VBLANK_IVB)
drm_handle_vblank(dev, 1);
if (de_iir & DE_PIPEC_VBLANK_IVB)
drm_handle_vblank(dev, 2);
/* check event from PCH */ /* check event from PCH */
if (de_iir & DE_PCH_EVENT_IVB) { if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
if (pch_iir & SDE_HOTPLUG_MASK_CPT) if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work); queue_work(dev_priv->wq, &dev_priv->hotplug_work);
pch_irq_handler(dev); pch_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
} }
pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
if (pm_iir & GEN6_PM_DEFERRED_EVENTS) if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir); gen6_queue_rps_work(dev_priv, pm_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
}
done:
I915_WRITE(DEIER, de_ier); I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER); POSTING_READ(DEIER);
...@@ -721,7 +707,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) ...@@ -721,7 +707,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT) { if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask) if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work); queue_work(dev_priv->wq, &dev_priv->hotplug_work);
pch_irq_handler(dev); pch_irq_handler(dev, pch_iir);
} }
if (de_iir & DE_PCU_EVENT) { if (de_iir & DE_PCU_EVENT) {
...@@ -1036,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1036,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (ring->obj == NULL)
continue;
i915_record_ring_state(dev, error, ring); i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer = error->ring[i].batchbuffer =
...@@ -1309,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev) ...@@ -1309,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged) void i915_handle_error(struct drm_device *dev, bool wedged)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
i915_capture_error_state(dev); i915_capture_error_state(dev);
i915_report_and_clear_eir(dev); i915_report_and_clear_eir(dev);
...@@ -1320,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged) ...@@ -1320,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
/* /*
* Wakeup waiting processes so they don't hang * Wakeup waiting processes so they don't hang
*/ */
wake_up_all(&dev_priv->ring[RCS].irq_queue); for_each_ring(ring, dev_priv, i)
if (HAS_BSD(dev)) wake_up_all(&ring->irq_queue);
wake_up_all(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
wake_up_all(&dev_priv->ring[BCS].irq_queue);
} }
queue_work(dev_priv->wq, &dev_priv->error_work); queue_work(dev_priv->wq, &dev_priv->error_work);
...@@ -1529,11 +1510,6 @@ ring_last_seqno(struct intel_ring_buffer *ring) ...@@ -1529,11 +1510,6 @@ ring_last_seqno(struct intel_ring_buffer *ring)
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{ {
/* We don't check whether the ring even exists before calling this
* function. Hence check whether it's initialized. */
if (ring->obj == NULL)
return true;
if (list_empty(&ring->request_list) || if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */ /* Issue a wake-up to catch stuck h/w. */
...@@ -1567,26 +1543,25 @@ static bool i915_hangcheck_hung(struct drm_device *dev) ...@@ -1567,26 +1543,25 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->hangcheck_count++ > 1) { if (dev_priv->hangcheck_count++ > 1) {
bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
i915_handle_error(dev, true); i915_handle_error(dev, true);
if (!IS_GEN2(dev)) { if (!IS_GEN2(dev)) {
struct intel_ring_buffer *ring;
int i;
/* Is the chip hanging on a WAIT_FOR_EVENT? /* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit * If so we can simply poke the RB_WAIT bit
* and break the hang. This should work on * and break the hang. This should work on
* all but the second generation chipsets. * all but the second generation chipsets.
*/ */
if (kick_ring(&dev_priv->ring[RCS])) for_each_ring(ring, dev_priv, i)
return false; hung &= !kick_ring(ring);
if (HAS_BSD(dev) && kick_ring(&dev_priv->ring[VCS]))
return false;
if (HAS_BLT(dev) && kick_ring(&dev_priv->ring[BCS]))
return false;
} }
return true; return hung;
} }
return false; return false;
...@@ -1602,16 +1577,23 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1602,16 +1577,23 @@ void i915_hangcheck_elapsed(unsigned long data)
{ {
struct drm_device *dev = (struct drm_device *)data; struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
bool err = false; struct intel_ring_buffer *ring;
bool err = false, idle;
int i;
if (!i915_enable_hangcheck) if (!i915_enable_hangcheck)
return; return;
memset(acthd, 0, sizeof(acthd));
idle = true;
for_each_ring(ring, dev_priv, i) {
idle &= i915_hangcheck_ring_idle(ring, &err);
acthd[i] = intel_ring_get_active_head(ring);
}
/* If all work is done then ACTHD clearly hasn't advanced. */ /* If all work is done then ACTHD clearly hasn't advanced. */
if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && if (idle) {
i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
if (err) { if (err) {
if (i915_hangcheck_hung(dev)) if (i915_hangcheck_hung(dev))
return; return;
...@@ -1630,15 +1612,8 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1630,15 +1612,8 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone = I915_READ(INSTDONE_I965); instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1); instdone1 = I915_READ(INSTDONE1);
} }
acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
acthd_bsd = HAS_BSD(dev) ?
intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
acthd_blt = HAS_BLT(dev) ?
intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
if (dev_priv->last_acthd == acthd && if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
dev_priv->last_acthd_bsd == acthd_bsd &&
dev_priv->last_acthd_blt == acthd_blt &&
dev_priv->last_instdone == instdone && dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) { dev_priv->last_instdone1 == instdone1) {
if (i915_hangcheck_hung(dev)) if (i915_hangcheck_hung(dev))
...@@ -1646,9 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1646,9 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
} else { } else {
dev_priv->hangcheck_count = 0; dev_priv->hangcheck_count = 0;
dev_priv->last_acthd = acthd; memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
dev_priv->last_acthd_bsd = acthd_bsd;
dev_priv->last_acthd_blt = acthd_blt;
dev_priv->last_instdone = instdone; dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1; dev_priv->last_instdone1 = instdone1;
} }
...@@ -2597,8 +2570,7 @@ void intel_irq_init(struct drm_device *dev) ...@@ -2597,8 +2570,7 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) || if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
IS_VALLEYVIEW(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter; dev->driver->get_vblank_counter = gm45_get_vblank_counter;
} }
...@@ -2624,6 +2596,14 @@ void intel_irq_init(struct drm_device *dev) ...@@ -2624,6 +2596,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank; dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank; dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (IS_HASWELL(dev)) {
/* Share interrupts handling with IVB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_preinstall = ironlake_irq_preinstall;
......
...@@ -1697,9 +1697,12 @@ ...@@ -1697,9 +1697,12 @@
/* Video Data Island Packet control */ /* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178 #define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170 #define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31) #define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29) #define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29) #define VIDEO_DIP_PORT_C (2 << 29)
#define VIDEO_DIP_PORT_D (3 << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_AVI (1 << 21) #define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) #define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21) #define VIDEO_DIP_ENABLE_SPD (8 << 21)
...@@ -1710,6 +1713,10 @@ ...@@ -1710,6 +1713,10 @@
#define VIDEO_DIP_FREQ_ONCE (0 << 16) #define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16) #define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) #define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */ /* Panel power sequencing */
#define PP_STATUS 0x61200 #define PP_STATUS 0x61200
...@@ -2476,7 +2483,8 @@ ...@@ -2476,7 +2483,8 @@
/* Pipe A */ /* Pipe A */
#define _PIPEADSL 0x70000 #define _PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff #define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008 #define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31) #define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0 #define PIPECONF_DISABLE 0
...@@ -3520,6 +3528,42 @@ ...@@ -3520,6 +3528,42 @@
#define VLV_TVIDEO_DIP_GCP(pipe) \ #define VLV_TVIDEO_DIP_GCP(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
#define HSW_VIDEO_DIP_GCP_A 0x60210
#define HSW_VIDEO_DIP_CTL_B 0x61200
#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define HSW_VIDEO_DIP_GCP_B 0x61210
#define HSW_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
#define HSW_TVIDEO_DIP_GCP(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
#define _TRANS_HTOTAL_B 0xe1000 #define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004 #define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008 #define _TRANS_HSYNC_B 0xe1008
......
...@@ -876,12 +876,6 @@ int i915_restore_state(struct drm_device *dev) ...@@ -876,12 +876,6 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER); I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR); I915_WRITE(IMR, dev_priv->saveIMR);
} }
mutex_unlock(&dev->struct_mutex);
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_modeset_init_hw(dev);
mutex_lock(&dev->struct_mutex);
/* Cache mode state */ /* Cache mode state */
I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
......
...@@ -615,7 +615,11 @@ void intel_crt_init(struct drm_device *dev) ...@@ -615,7 +615,11 @@ void intel_crt_init(struct drm_device *dev)
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT | 1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT); 1 << INTEL_SDVO_LVDS_CLONE_BIT);
if (IS_HASWELL(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1); crt->base.crtc_mask = (1 << 0) | (1 << 1);
if (IS_GEN2(dev)) if (IS_GEN2(dev))
connector->interlace_allowed = 0; connector->interlace_allowed = 0;
else else
......
This diff is collapsed.
This diff is collapsed.
...@@ -280,16 +280,29 @@ struct dip_infoframe { ...@@ -280,16 +280,29 @@ struct dip_infoframe {
uint16_t bottom_bar_start; uint16_t bottom_bar_start;
uint16_t left_bar_end; uint16_t left_bar_end;
uint16_t right_bar_start; uint16_t right_bar_start;
} avi; } __attribute__ ((packed)) avi;
struct { struct {
uint8_t vn[8]; uint8_t vn[8];
uint8_t pd[16]; uint8_t pd[16];
uint8_t sdi; uint8_t sdi;
} spd; } __attribute__ ((packed)) spd;
uint8_t payload[27]; uint8_t payload[27];
} __attribute__ ((packed)) body; } __attribute__ ((packed)) body;
} __attribute__((packed)); } __attribute__((packed));
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
};
static inline struct drm_crtc * static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{ {
...@@ -329,7 +342,11 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) ...@@ -329,7 +342,11 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev); extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob); bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev); extern void intel_dvo_init(struct drm_device *dev);
...@@ -446,12 +463,17 @@ extern void intel_init_clock_gating(struct drm_device *dev); ...@@ -446,12 +463,17 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder, extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode); struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */ /* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev); extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width, uint32_t sprite_width,
int pixel_size); int pixel_size);
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
...@@ -475,4 +497,9 @@ extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); ...@@ -475,4 +497,9 @@ extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev); extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev);
extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
#endif /* __INTEL_DRV_H__ */ #endif /* __INTEL_DRV_H__ */
This diff is collapsed.
...@@ -490,6 +490,10 @@ int intel_setup_gmbus(struct drm_device *dev) ...@@ -490,6 +490,10 @@ int intel_setup_gmbus(struct drm_device *dev)
/* By default use a conservative clock rate */ /* By default use a conservative clock rate */
bus->reg0 = port | GMBUS_RATE_100KHZ; bus->reg0 = port | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
bus->force_bit = true;
intel_gpio_setup(bus, port); intel_gpio_setup(bus, port);
} }
......
...@@ -1803,8 +1803,7 @@ static void sandybridge_update_wm(struct drm_device *dev) ...@@ -1803,8 +1803,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 2; enabled |= 2;
} }
/* IVB has 3 pipes */ if ((dev_priv->num_pipe == 3) &&
if (IS_IVYBRIDGE(dev) &&
g4x_compute_wm0(dev, 2, g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency, &sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency, &sandybridge_cursor_wm_info, latency,
...@@ -1884,6 +1883,33 @@ static void sandybridge_update_wm(struct drm_device *dev) ...@@ -1884,6 +1883,33 @@ static void sandybridge_update_wm(struct drm_device *dev)
cursor_wm); cursor_wm);
} }
static void
haswell_update_linetime_wm(struct drm_device *dev, int pipe,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
temp = I915_READ(PIPE_WM_LINETIME(pipe));
temp &= ~PIPE_WM_LINETIME_MASK;
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
temp |= PIPE_WM_LINETIME_TIME(
((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
/* IPS watermarks are only used by pipe A, and are ignored by
* pipes B and C. They are calculated similarly to the common
* linetime values, except that we are using CD clock frequency
* in MHz instead of pixel rate for the division.
*
* This is a placeholder for the IPS watermark calculation code.
*/
I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
}
static bool static bool
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size, uint32_t sprite_width, int pixel_size,
...@@ -2079,6 +2105,15 @@ void intel_update_watermarks(struct drm_device *dev) ...@@ -2079,6 +2105,15 @@ void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev); dev_priv->display.update_wm(dev);
} }
void intel_update_linetime_watermarks(struct drm_device *dev,
int pipe, struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->display.update_linetime_wm)
dev_priv->display.update_linetime_wm(dev, pipe, mode);
}
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size) uint32_t sprite_width, int pixel_size)
{ {
...@@ -2291,6 +2326,7 @@ int intel_enable_rc6(const struct drm_device *dev) ...@@ -2291,6 +2326,7 @@ int intel_enable_rc6(const struct drm_device *dev)
void gen6_enable_rps(struct drm_i915_private *dev_priv) void gen6_enable_rps(struct drm_i915_private *dev_priv)
{ {
struct intel_ring_buffer *ring;
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0; u32 pcu_mbox, rc6_mask = 0;
...@@ -2325,8 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) ...@@ -2325,8 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
...@@ -3560,6 +3596,37 @@ void intel_sanitize_pm(struct drm_device *dev) ...@@ -3560,6 +3596,37 @@ void intel_sanitize_pm(struct drm_device *dev)
dev_priv->display.sanitize_pm(dev); dev_priv->display.sanitize_pm(dev);
} }
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
void intel_init_power_wells(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long power_wells[] = {
HSW_PWR_WELL_CTL1,
HSW_PWR_WELL_CTL2,
HSW_PWR_WELL_CTL4
};
int i;
if (!IS_HASWELL(dev))
return;
mutex_lock(&dev->struct_mutex);
for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
int well = I915_READ(power_wells[i]);
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
mutex_unlock(&dev->struct_mutex);
}
/* Set up chip specific power management-related functions */ /* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev) void intel_init_pm(struct drm_device *dev)
{ {
...@@ -3655,6 +3722,18 @@ void intel_init_pm(struct drm_device *dev) ...@@ -3655,6 +3722,18 @@ void intel_init_pm(struct drm_device *dev)
} }
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
dev_priv->display.sanitize_pm = gen6_sanitize_pm; dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else if (IS_HASWELL(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else } else
dev_priv->display.update_wm = NULL; dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) { } else if (IS_VALLEYVIEW(dev)) {
...@@ -3708,5 +3787,10 @@ void intel_init_pm(struct drm_device *dev) ...@@ -3708,5 +3787,10 @@ void intel_init_pm(struct drm_device *dev)
else else
dev_priv->display.get_fifo_size = i830_get_fifo_size; dev_priv->display.get_fifo_size = i830_get_fifo_size;
} }
/* We attempt to init the necessary power wells early in the initialization
* time, so the subsystems that expect power to be enabled can work.
*/
intel_init_power_wells(dev);
} }
...@@ -119,6 +119,12 @@ struct intel_ring_buffer { ...@@ -119,6 +119,12 @@ struct intel_ring_buffer {
void *private; void *private;
}; };
static inline bool
intel_ring_initialized(struct intel_ring_buffer *ring)
{
return ring->obj != NULL;
}
static inline unsigned static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring) intel_ring_flag(struct intel_ring_buffer *ring)
{ {
......
...@@ -887,17 +887,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) ...@@ -887,17 +887,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
}; };
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 }; uint8_t set_buf_index[2] = { 1, 0 };
uint64_t *data = (uint64_t *)&avi_if; uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
uint64_t *data = (uint64_t *)sdvo_data;
unsigned i; unsigned i;
intel_dip_infoframe_csum(&avi_if); intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
* we must not send the ecc field, either. */
memcpy(sdvo_data, &avi_if, 3);
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
if (!intel_sdvo_set_value(intel_sdvo, if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX, SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2)) set_buf_index, 2))
return false; return false;
for (i = 0; i < sizeof(avi_if); i += 8) { for (i = 0; i < sizeof(sdvo_data); i += 8) {
if (!intel_sdvo_set_value(intel_sdvo, if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_DATA, SDVO_CMD_SET_HBUF_DATA,
data, 8)) data, 8))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment