Commit eda1be63 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/i915: Move legacy breadcrumb out of the reserved status page area
  drm/i915: Filter pci devices based on PCI_CLASS_DISPLAY_VGA
  drm/radeon: map registers at load time
  drm: Remove infrastructure for supporting i915's vblank swapping.
  i915: Remove racy delayed vblank swap ioctl.
  i915: Don't whine when pci_enable_msi() fails.
  i915: Don't attempt to short-circuit object_wait_rendering by checking domains.
  i915: Clean up sarea pointers on leavevt
  i915: Save/restore MCHBAR_RENDER_STANDBY on GM965/GM45
parents 5da38d32 0baf823a
......@@ -266,11 +266,19 @@ int drm_init(struct drm_driver *driver)
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
* function that pci_get_subsys and pci_get_class used, we'd
* be able to just pass pid in instead of doing a two-stage
* thing.
*/
pdev = NULL;
/* pass back in pdev to account for multiple identical cards */
while ((pdev =
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev)) != NULL) {
if ((pdev->class & pid->class_mask) != pid->class)
continue;
/* stealth mode requires a manual probe */
pci_dev_get(pdev);
drm_get_dev(pdev, pid, driver);
......
......@@ -280,8 +280,6 @@ int drm_irq_uninstall(struct drm_device * dev)
drm_vblank_cleanup(dev);
dev->locked_tasklet_func = NULL;
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
......@@ -699,81 +697,3 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
drm_vbl_send_signals(dev, crtc);
}
EXPORT_SYMBOL(drm_handle_vblank);
/**
* Tasklet wrapper function.
*
* \param data DRM device in disguise.
*
* Attempts to grab the HW lock and calls the driver callback on success. On
* failure, leave the lock marked as contended so the callback can be called
* from drm_unlock().
*/
static void drm_locked_tasklet_func(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
unsigned long irqflags;
void (*tasklet_func)(struct drm_device *);
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
tasklet_func = dev->locked_tasklet_func;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
if (!tasklet_func ||
!drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT)) {
return;
}
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
tasklet_func = dev->locked_tasklet_func;
dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
if (tasklet_func != NULL)
tasklet_func(dev);
drm_lock_free(&dev->lock,
DRM_KERNEL_CONTEXT);
}
/**
* Schedule a tasklet to call back a driver hook with the HW lock held.
*
* \param dev DRM device.
* \param func Driver callback.
*
* This is intended for triggering actions that require the HW lock from an
* interrupt handler. The lock will be grabbed ASAP after the interrupt handler
* completes. Note that the callback may be called from interrupt or process
* context, it must not make any assumptions about this. Also, the HW lock will
* be held with the kernel context or any client context.
*/
void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
{
unsigned long irqflags;
static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
return;
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
if (dev->locked_tasklet_func) {
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return;
}
dev->locked_tasklet_func = func;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
drm_tasklet.data = (unsigned long)dev;
tasklet_hi_schedule(&drm_tasklet);
}
EXPORT_SYMBOL(drm_locked_tasklet);
......@@ -154,8 +154,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
unsigned long irqflags;
void (*tasklet_func)(struct drm_device *);
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
......@@ -163,13 +161,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
tasklet_func = dev->locked_tasklet_func;
dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
if (tasklet_func != NULL)
tasklet_func(dev);
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
/* kernel_context_switch isn't used by any of the x86 drm
......
......@@ -92,7 +92,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
......
......@@ -154,6 +154,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
dev_priv->sarea = NULL;
dev_priv->sarea_priv = NULL;
return 0;
}
......@@ -442,7 +445,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
......@@ -573,7 +576,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
......@@ -608,7 +611,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
......@@ -634,7 +636,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
if (sarea_priv)
sarea_priv->last_dispatch = (int)hw_status[5];
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return ret;
}
......@@ -642,7 +644,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
......@@ -670,7 +671,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
}
if (sarea_priv)
sarea_priv->last_dispatch = (int)hw_status[5];
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
}
......@@ -849,8 +850,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* be lost or delayed
*/
if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
if (pci_enable_msi(dev->pdev))
DRM_ERROR("failed to enable MSI\n");
pci_enable_msi(dev->pdev);
intel_opregion_init(dev);
......
......@@ -88,13 +88,6 @@ struct mem_block {
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
unsigned int pipe;
unsigned int sequence;
} drm_i915_vbl_swap_t;
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
......@@ -146,10 +139,6 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
struct intel_opregion opregion;
/* Register state */
......@@ -157,6 +146,7 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveRENDERSTANDBY;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
......@@ -241,9 +231,6 @@ typedef struct drm_i915_private {
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
/** Work task for vblank-related ring access */
struct work_struct vblank_work;
struct {
struct drm_mm gtt_space;
......@@ -444,7 +431,6 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);
extern void i915_vblank_work_handler(struct work_struct *work);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
......@@ -622,8 +608,9 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
......
......@@ -1455,11 +1455,9 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
read_domains, write_domain);
/* Wait on any GPU rendering to the object to be flushed. */
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
if (obj_priv->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
......
This diff is collapsed.
......@@ -527,6 +527,9 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
/*
* Overlay regs
*/
......
......@@ -240,6 +240,10 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Render Standby */
if (IS_I965G(dev) && IS_MOBILE(dev))
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
......@@ -365,6 +369,11 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Render Standby */
if (IS_I965G(dev) && IS_MOBILE(dev))
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
/* Display arbitration */
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Pipe & plane A info */
......
......@@ -1751,6 +1751,12 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
else
dev_priv->flags |= RADEON_IS_PCI;
ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
drm_get_resource_len(dev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
if (ret != 0)
return ret;
DRM_DEBUG("%s card detected\n",
((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
return ret;
......@@ -1767,12 +1773,6 @@ int radeon_driver_firstopen(struct drm_device *dev)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
drm_get_resource_len(dev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY, &dev_priv->mmio);
if (ret != 0)
return ret;
dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
ret = drm_addmap(dev, dev_priv->fb_aper_offset,
drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
......@@ -1788,6 +1788,9 @@ int radeon_driver_unload(struct drm_device *dev)
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
drm_rmmap(dev, dev_priv->mmio);
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL;
......
......@@ -287,7 +287,6 @@ typedef struct drm_radeon_private {
unsigned long gart_textures_offset;
drm_local_map_t *sarea;
drm_local_map_t *mmio;
drm_local_map_t *cp_ring;
drm_local_map_t *ring_rptr;
drm_local_map_t *gart_textures;
......@@ -318,6 +317,7 @@ typedef struct drm_radeon_private {
int num_gb_pipes;
int track_flush;
drm_local_map_t *mmio;
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
......
......@@ -861,8 +861,6 @@ struct drm_device {
struct timer_list vblank_disable_timer;
u32 max_vblank_count; /**< size of vblank counter register */
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
/*@} */
cycles_t ctx_start;
......@@ -1149,8 +1147,6 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
extern void drm_locked_tasklet(struct drm_device *dev,
void(*func)(struct drm_device *));
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
......@@ -1158,7 +1154,6 @@ extern void drm_vblank_put(struct drm_device *dev, int crtc);
/* Modesetting support */
extern int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
/* AGP/GART support (drm_agpsupport.h) */
extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
......
......@@ -395,27 +395,27 @@
{0, 0, 0}
#define i915_PCI_IDS \
{0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0, 0, 0}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment