Commit 4662db44 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/i915: Fix unintended recursion in ironlake_disable_rc6
  drm/i915: fix corruptions on i8xx due to relaxed fencing
  drm/i915: skip FDI & PCH enabling for DP_A
  agp/intel: Experiment with a 855GM GWB bit
  drm/i915: don't enable FDI & transcoder interrupts after all
  drm/i915: Ignore a hung GPU when flushing the framebuffer prior to a switch
parents 86e2fe9f 3c0556e9
...@@ -130,6 +130,7 @@ ...@@ -130,6 +130,7 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60 #define I915_IFPADDR 0x60
#define I830_HIC 0x70
/* Intel 965G registers */ /* Intel 965G registers */
#define I965_MSAC 0x62 #define I965_MSAC 0x62
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/agp_backend.h> #include <linux/agp_backend.h>
#include <linux/delay.h>
#include <asm/smp.h> #include <asm/smp.h>
#include "agp.h" #include "agp.h"
#include "intel-agp.h" #include "intel-agp.h"
...@@ -70,12 +71,8 @@ static struct _intel_private { ...@@ -70,12 +71,8 @@ static struct _intel_private {
u32 __iomem *gtt; /* I915G */ u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */ bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries; int num_dcache_entries;
union {
void __iomem *i9xx_flush_page; void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
};
char *i81x_gtt_table; char *i81x_gtt_table;
struct page *i8xx_page;
struct resource ifp_resource; struct resource ifp_resource;
int resource_valid; int resource_valid;
struct page *scratch_page; struct page *scratch_page;
...@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void) ...@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void) static void i830_cleanup(void)
{ {
if (intel_private.i8xx_flush_page) {
kunmap(intel_private.i8xx_flush_page);
intel_private.i8xx_flush_page = NULL;
}
__free_page(intel_private.i8xx_page);
intel_private.i8xx_page = NULL;
}
static void intel_i830_setup_flush(void)
{
/* return if we've already set the flush mechanism up */
if (intel_private.i8xx_page)
return;
intel_private.i8xx_page = alloc_page(GFP_KERNEL);
if (!intel_private.i8xx_page)
return;
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
i830_cleanup();
} }
/* The chipset_flush interface needs to get data that has already been /* The chipset_flush interface needs to get data that has already been
...@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void) ...@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
*/ */
static void i830_chipset_flush(void) static void i830_chipset_flush(void)
{ {
unsigned int *pg = intel_private.i8xx_flush_page; unsigned long timeout = jiffies + msecs_to_jiffies(1000);
memset(pg, 0, 1024); /* Forcibly evict everything from the CPU write buffers.
* clflush appears to be insufficient.
*/
wbinvd_on_all_cpus();
/* Now we've only seen documents for this magic bit on 855GM,
* we hope it exists for the other gen2 chipsets...
*
* Also works as advertised on my 845G.
*/
writel(readl(intel_private.registers+I830_HIC) | (1<<31),
intel_private.registers+I830_HIC);
if (cpu_has_clflush) while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
clflush_cache_range(pg, 1024); if (time_after(jiffies, timeout))
else if (wbinvd_on_all_cpus() != 0) break;
printk(KERN_ERR "Timed out waiting for cache flush.\n");
udelay(50);
}
} }
static void i830_write_entry(dma_addr_t addr, unsigned int entry, static void i830_write_entry(dma_addr_t addr, unsigned int entry,
...@@ -849,8 +837,6 @@ static int i830_setup(void) ...@@ -849,8 +837,6 @@ static int i830_setup(void)
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
intel_i830_setup_flush();
return 0; return 0;
} }
......
...@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) ...@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
static bool static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{ {
int tile_width; int tile_width, tile_height;
/* Linear is always fine */ /* Linear is always fine */
if (tiling_mode == I915_TILING_NONE) if (tiling_mode == I915_TILING_NONE)
...@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) ...@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
} }
} }
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_height = 32;
else
tile_height = 8;
/* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
* number of tile rows. */
if (IS_GEN2(dev))
tile_height *= 2;
/* Size needs to be aligned to a full tile row */
if (size & (tile_height * stride - 1))
return false;
/* 965+ just needs multiples of tile width */ /* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1)) if (stride & (tile_width - 1))
......
...@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work) ...@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config; struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder; struct intel_encoder *encoder;
DRM_DEBUG_KMS("running encoder hotplug functions\n");
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug) if (encoder->hot_plug)
encoder->hot_plug(encoder); encoder->hot_plug(encoder);
...@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) ...@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else { } else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; hotplug_mask |= SDE_AUX_MASK;
I915_WRITE(FDI_RXA_IMR, 0);
I915_WRITE(FDI_RXB_IMR, 0);
} }
dev_priv->pch_irq_mask = ~hotplug_mask; dev_priv->pch_irq_mask = ~hotplug_mask;
......
...@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue, wait_event(dev_priv->pending_flip_queue,
atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0); atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending /* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the * MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old * current scanout is retired before unpinning the old
* framebuffer. * framebuffer.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/ */
ret = i915_gem_object_flush_gpu(obj, false); ret = i915_gem_object_flush_gpu(obj, false);
if (ret) { (void) ret;
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
} }
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
...@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) ...@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0); atomic_read(&obj->pending_flip) == 0);
} }
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
return false;
continue;
}
}
return true;
}
static void ironlake_crtc_enable(struct drm_crtc *crtc) static void ironlake_crtc_enable(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
...@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) ...@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane; int plane = intel_crtc->plane;
u32 reg, temp; u32 reg, temp;
bool is_pch_port = false;
if (intel_crtc->active) if (intel_crtc->active)
return; return;
...@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) ...@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
} }
is_pch_port = intel_crtc_driving_pch(crtc);
if (is_pch_port)
ironlake_fdi_enable(crtc); ironlake_fdi_enable(crtc);
else {
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
POSTING_READ(reg);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev))
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(100);
}
/* Enable panel fitting for LVDS */ /* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size && if (dev_priv->pch_pf_size &&
...@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) ...@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane); intel_flush_display_plane(dev, plane);
} }
/* Skip the PCH stuff if possible */
if (!is_pch_port)
goto done;
/* For PCH output, training FDI link */ /* For PCH output, training FDI link */
if (IS_GEN6(dev)) if (IS_GEN6(dev))
gen6_fdi_link_train(crtc); gen6_fdi_link_train(crtc);
...@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) ...@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE); I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe); DRM_ERROR("failed to enable transcoder %d\n", pipe);
done:
intel_crtc_load_lut(crtc); intel_crtc_load_lut(crtc);
intel_update_fbc(dev); intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true); intel_crtc_update_cursor(crtc, true);
...@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev) ...@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
POSTING_READ(RSTDBYCTL); POSTING_READ(RSTDBYCTL);
} }
ironlake_disable_rc6(dev); ironlake_teardown_rc6(dev);
} }
static int ironlake_setup_rc6(struct drm_device *dev) static int ironlake_setup_rc6(struct drm_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment