Commit 9c152646 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://drm.bkbits.net/drm-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 49e9c1cc 4c6b5945
...@@ -46,8 +46,8 @@ ...@@ -46,8 +46,8 @@
#define DRM_IOC_WRITE _IOC_WRITE #define DRM_IOC_WRITE _IOC_WRITE
#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#elif defined(__FreeBSD__) || defined(__NetBSD__) #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
#if defined(__FreeBSD__) && defined(XFree86Server) #if defined(__FreeBSD__) && defined(IN_MODULE)
/* Prevent name collision when including sys/ioccom.h */ /* Prevent name collision when including sys/ioccom.h */
#undef ioctl #undef ioctl
#include <sys/ioccom.h> #include <sys/ioccom.h>
...@@ -130,6 +130,18 @@ typedef struct drm_tex_region { ...@@ -130,6 +130,18 @@ typedef struct drm_tex_region {
unsigned int age; unsigned int age;
} drm_tex_region_t; } drm_tex_region_t;
/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
typedef struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
} drm_hw_lock_t;
/** /**
* DRM_IOCTL_VERSION ioctl argument type. * DRM_IOCTL_VERSION ioctl argument type.
...@@ -580,6 +592,16 @@ typedef struct drm_scatter_gather { ...@@ -580,6 +592,16 @@ typedef struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */ unsigned long handle; /**< Used for mapping / unmapping */
} drm_scatter_gather_t; } drm_scatter_gather_t;
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
typedef struct drm_set_version {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
} drm_set_version_t;
#define DRM_IOCTL_BASE 'd' #define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
...@@ -594,6 +616,7 @@ typedef struct drm_scatter_gather { ...@@ -594,6 +616,7 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t) #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t) #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t) #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t) #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t) #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
......
...@@ -92,8 +92,8 @@ ...@@ -92,8 +92,8 @@
#ifndef __HAVE_DMA #ifndef __HAVE_DMA
#define __HAVE_DMA 0 #define __HAVE_DMA 0
#endif #endif
#ifndef __HAVE_DMA_IRQ #ifndef __HAVE_IRQ
#define __HAVE_DMA_IRQ 0 #define __HAVE_IRQ 0
#endif #endif
#ifndef __HAVE_DMA_WAITLIST #ifndef __HAVE_DMA_WAITLIST
#define __HAVE_DMA_WAITLIST 0 #define __HAVE_DMA_WAITLIST 0
...@@ -148,6 +148,7 @@ ...@@ -148,6 +148,7 @@
#define DRM_MEM_CTXBITMAP 18 #define DRM_MEM_CTXBITMAP 18
#define DRM_MEM_STUB 19 #define DRM_MEM_STUB 19
#define DRM_MEM_SGLISTS 20 #define DRM_MEM_SGLISTS 20
#define DRM_MEM_CTXLIST 21
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
...@@ -324,6 +325,7 @@ do { \ ...@@ -324,6 +325,7 @@ do { \
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) #define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
/** /**
* Get the private SAREA mapping. * Get the private SAREA mapping.
* *
...@@ -362,11 +364,6 @@ do { \ ...@@ -362,11 +364,6 @@ do { \
typedef int drm_ioctl_t( struct inode *inode, struct file *filp, typedef int drm_ioctl_t( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
typedef struct drm_pci_list {
u16 vendor;
u16 device;
} drm_pci_list_t;
typedef struct drm_ioctl_desc { typedef struct drm_ioctl_desc {
drm_ioctl_t *func; drm_ioctl_t *func;
int auth_needed; int auth_needed;
...@@ -463,18 +460,6 @@ typedef struct drm_buf_entry { ...@@ -463,18 +460,6 @@ typedef struct drm_buf_entry {
drm_freelist_t freelist; drm_freelist_t freelist;
} drm_buf_entry_t; } drm_buf_entry_t;
/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
typedef struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
} drm_hw_lock_t;
/** File private data */ /** File private data */
typedef struct drm_file { typedef struct drm_file {
int authenticated; int authenticated;
...@@ -488,6 +473,9 @@ typedef struct drm_file { ...@@ -488,6 +473,9 @@ typedef struct drm_file {
struct drm_device *dev; struct drm_device *dev;
int remove_auth_on_close; int remove_auth_on_close;
unsigned long lock_count; unsigned long lock_count;
#ifdef DRIVER_FILE_FIELDS
DRIVER_FILE_FIELDS;
#endif
} drm_file_t; } drm_file_t;
/** Wait queue */ /** Wait queue */
...@@ -602,6 +590,15 @@ typedef struct drm_map_list { ...@@ -602,6 +590,15 @@ typedef struct drm_map_list {
typedef drm_map_t drm_local_map_t; typedef drm_map_t drm_local_map_t;
/**
* Context handle list
*/
typedef struct drm_ctx_list {
struct list_head head; /**< list head */
drm_context_t handle; /**< context handle */
drm_file_t *tag; /**< associated fd private data */
} drm_ctx_list_t;
#if __HAVE_VBL_IRQ #if __HAVE_VBL_IRQ
typedef struct drm_vbl_sig { typedef struct drm_vbl_sig {
...@@ -622,6 +619,8 @@ typedef struct drm_device { ...@@ -622,6 +619,8 @@ typedef struct drm_device {
int unique_len; /**< Length of unique field */ int unique_len; /**< Length of unique field */
dev_t device; /**< Device number for mknod */ dev_t device; /**< Device number for mknod */
char *devname; /**< For /proc/interrupts */ char *devname; /**< For /proc/interrupts */
int minor; /**< Minor device number */
int if_version; /**< Highest interface version set */
int blocked; /**< Blocked due to VC switch? */ int blocked; /**< Blocked due to VC switch? */
struct proc_dir_entry *root; /**< Root for this device's entries */ struct proc_dir_entry *root; /**< Root for this device's entries */
...@@ -660,6 +659,12 @@ typedef struct drm_device { ...@@ -660,6 +659,12 @@ typedef struct drm_device {
drm_map_list_t *maplist; /**< Linked list of regions */ drm_map_list_t *maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */ int map_count; /**< Number of mappable regions */
/** \name Context handle management */
/*@{*/
drm_ctx_list_t *ctxlist; /**< Linked list of context handles */
int ctx_count; /**< Number of context handles */
struct semaphore ctxlist_sem; /**< For ctxlist */
drm_map_t **context_sareas; /**< per-context SAREA's */ drm_map_t **context_sareas; /**< per-context SAREA's */
int max_context; int max_context;
...@@ -679,6 +684,7 @@ typedef struct drm_device { ...@@ -679,6 +684,7 @@ typedef struct drm_device {
/** \name Context support */ /** \name Context support */
/*@{*/ /*@{*/
int irq; /**< Interrupt used by board */ int irq; /**< Interrupt used by board */
int irq_enabled; /**< True if irq handler is enabled */
__volatile__ long context_flag; /**< Context swapping flag */ __volatile__ long context_flag; /**< Context swapping flag */
__volatile__ long interrupt_flag; /**< Interruption handler flag */ __volatile__ long interrupt_flag; /**< Interruption handler flag */
__volatile__ long dma_flag; /**< DMA dispatch flag */ __volatile__ long dma_flag; /**< DMA dispatch flag */
...@@ -714,7 +720,12 @@ typedef struct drm_device { ...@@ -714,7 +720,12 @@ typedef struct drm_device {
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
drm_agp_head_t *agp; /**< AGP data */ drm_agp_head_t *agp; /**< AGP data */
#endif #endif
struct pci_dev *pdev; /**< PCI device structure */
struct pci_dev *pdev; /**< PCI device structure */
int pci_domain; /**< PCI bus domain number */
int pci_bus; /**< PCI bus number */
int pci_slot; /**< PCI slot number */
int pci_func; /**< PCI function number */
#ifdef __alpha__ #ifdef __alpha__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
struct pci_controler *hose; struct pci_controler *hose;
...@@ -758,18 +769,6 @@ extern int DRM(flush)(struct file *filp); ...@@ -758,18 +769,6 @@ extern int DRM(flush)(struct file *filp);
extern int DRM(fasync)(int fd, struct file *filp, int on); extern int DRM(fasync)(int fd, struct file *filp, int on);
/* Mapping support (drm_vm.h) */ /* Mapping support (drm_vm.h) */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type);
extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type);
extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type);
extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type);
extern void DRM(vm_open)(struct vm_area_struct *vma); extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma); extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma); extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
...@@ -804,8 +803,8 @@ extern int DRM(unbind_agp)(DRM_AGP_MEM *handle); ...@@ -804,8 +803,8 @@ extern int DRM(unbind_agp)(DRM_AGP_MEM *handle);
#endif #endif
/* Misc. IOCTL support (drm_ioctl.h) */ /* Misc. IOCTL support (drm_ioctl.h) */
extern int DRM(irq_busid)(struct inode *inode, struct file *filp, extern int DRM(irq_by_busid)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int DRM(getunique)(struct inode *inode, struct file *filp, extern int DRM(getunique)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int DRM(setunique)(struct inode *inode, struct file *filp, extern int DRM(setunique)(struct inode *inode, struct file *filp,
...@@ -816,6 +815,8 @@ extern int DRM(getclient)(struct inode *inode, struct file *filp, ...@@ -816,6 +815,8 @@ extern int DRM(getclient)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int DRM(getstats)(struct inode *inode, struct file *filp, extern int DRM(getstats)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int DRM(setversion)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Context IOCTL support (drm_context.h) */ /* Context IOCTL support (drm_context.h) */
extern int DRM(resctx)( struct inode *inode, struct file *filp, extern int DRM(resctx)( struct inode *inode, struct file *filp,
...@@ -900,12 +901,17 @@ extern int DRM(dma_setup)(drm_device_t *dev); ...@@ -900,12 +901,17 @@ extern int DRM(dma_setup)(drm_device_t *dev);
extern void DRM(dma_takedown)(drm_device_t *dev); extern void DRM(dma_takedown)(drm_device_t *dev);
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf); extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
extern void DRM(reclaim_buffers)( struct file *filp ); extern void DRM(reclaim_buffers)( struct file *filp );
#if __HAVE_DMA_IRQ #endif /* __HAVE_DMA */
/* IRQ support (drm_irq.h) */
#if __HAVE_IRQ || __HAVE_DMA
extern int DRM(control)( struct inode *inode, struct file *filp, extern int DRM(control)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
extern int DRM(irq_install)( drm_device_t *dev, int irq ); #endif
#if __HAVE_IRQ
extern int DRM(irq_install)( drm_device_t *dev );
extern int DRM(irq_uninstall)( drm_device_t *dev ); extern int DRM(irq_uninstall)( drm_device_t *dev );
extern irqreturn_t DRM(dma_service)( DRM_IRQ_ARGS ); extern irqreturn_t DRM(irq_handler)( DRM_IRQ_ARGS );
extern void DRM(driver_irq_preinstall)( drm_device_t *dev ); extern void DRM(driver_irq_preinstall)( drm_device_t *dev );
extern void DRM(driver_irq_postinstall)( drm_device_t *dev ); extern void DRM(driver_irq_postinstall)( drm_device_t *dev );
extern void DRM(driver_irq_uninstall)( drm_device_t *dev ); extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
...@@ -915,12 +921,11 @@ extern int DRM(wait_vblank)(struct inode *inode, struct file *filp, ...@@ -915,12 +921,11 @@ extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq); extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev ); extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif #endif
#if __HAVE_DMA_IRQ_BH #if __HAVE_IRQ_BH
extern void DRM(dma_immediate_bh)( void *dev ); extern void DRM(irq_immediate_bh)( void *dev );
#endif #endif
#endif #endif
#endif /* __HAVE_DMA */
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
/* AGP/GART support (drm_agpsupport.h) */ /* AGP/GART support (drm_agpsupport.h) */
......
...@@ -103,7 +103,13 @@ int DRM(agp_acquire)(struct inode *inode, struct file *filp, ...@@ -103,7 +103,13 @@ int DRM(agp_acquire)(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
int retcode; int retcode;
if (!dev->agp || dev->agp->acquired || !drm_agp->acquire) if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
if (!drm_agp->acquire)
return -EINVAL;
if ( dev->agp->cant_use_aperture )
return -EINVAL; return -EINVAL;
if ((retcode = drm_agp->acquire())) if ((retcode = drm_agp->acquire()))
return retcode; return retcode;
......
...@@ -147,7 +147,9 @@ int DRM(addmap)( struct inode *inode, struct file *filp, ...@@ -147,7 +147,9 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
MTRR_TYPE_WRCOMB, 1 ); MTRR_TYPE_WRCOMB, 1 );
} }
#endif #endif
map->handle = DRM(ioremap)( map->offset, map->size, dev ); if (map->type == _DRM_REGISTERS)
map->handle = DRM(ioremap)( map->offset, map->size,
dev );
break; break;
case _DRM_SHM: case _DRM_SHM:
...@@ -160,6 +162,12 @@ int DRM(addmap)( struct inode *inode, struct file *filp, ...@@ -160,6 +162,12 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
} }
map->offset = (unsigned long)map->handle; map->offset = (unsigned long)map->handle;
if ( map->flags & _DRM_CONTAINS_LOCK ) { if ( map->flags & _DRM_CONTAINS_LOCK ) {
/* Prevent a 2nd X Server from creating a 2nd lock */
if (dev->lock.hw_lock != NULL) {
vfree( map->handle );
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EBUSY;
}
dev->sigdata.lock = dev->sigdata.lock =
dev->lock.hw_lock = map->handle; /* Pointer to lock */ dev->lock.hw_lock = map->handle; /* Pointer to lock */
} }
...@@ -767,7 +775,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp, ...@@ -767,7 +775,7 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
} }
#endif /* __HAVE_PCI_DMA */ #endif /* __HAVE_PCI_DMA */
#ifdef __HAVE_SG #if __HAVE_SG
int DRM(addbufs_sg)( struct inode *inode, struct file *filp, int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ) unsigned int cmd, unsigned long arg )
{ {
......
...@@ -401,6 +401,7 @@ int DRM(addctx)( struct inode *inode, struct file *filp, ...@@ -401,6 +401,7 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
{ {
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
drm_ctx_list_t * ctx_entry;
drm_ctx_t ctx; drm_ctx_t ctx;
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) ) if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
...@@ -421,6 +422,20 @@ int DRM(addctx)( struct inode *inode, struct file *filp, ...@@ -421,6 +422,20 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
if ( ctx.handle != DRM_KERNEL_CONTEXT ) if ( ctx.handle != DRM_KERNEL_CONTEXT )
DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */ DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
#endif #endif
ctx_entry = DRM(alloc)( sizeof(*ctx_entry), DRM_MEM_CTXLIST );
if ( !ctx_entry ) {
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
INIT_LIST_HEAD( &ctx_entry->head );
ctx_entry->handle = ctx.handle;
ctx_entry->tag = priv;
down( &dev->ctxlist_sem );
list_add( &ctx_entry->head, &dev->ctxlist->head );
++dev->ctx_count;
up( &dev->ctxlist_sem );
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) ) if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT; return -EFAULT;
...@@ -543,6 +558,20 @@ int DRM(rmctx)( struct inode *inode, struct file *filp, ...@@ -543,6 +558,20 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
DRM(ctxbitmap_free)( dev, ctx.handle ); DRM(ctxbitmap_free)( dev, ctx.handle );
} }
down( &dev->ctxlist_sem );
if ( !list_empty( &dev->ctxlist->head ) ) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
if ( pos->handle == ctx.handle ) {
list_del( &pos->head );
DRM(free)( pos, sizeof(*pos), DRM_MEM_CTXLIST );
--dev->ctx_count;
}
}
}
up( &dev->ctxlist_sem );
return 0; return 0;
} }
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include "drmP.h" #include "drmP.h"
#include <linux/interrupt.h> /* For task queue support */
#ifndef __HAVE_DMA_WAITQUEUE #ifndef __HAVE_DMA_WAITQUEUE
#define __HAVE_DMA_WAITQUEUE 0 #define __HAVE_DMA_WAITQUEUE 0
...@@ -43,15 +42,6 @@ ...@@ -43,15 +42,6 @@
#ifndef __HAVE_DMA_RECLAIM #ifndef __HAVE_DMA_RECLAIM
#define __HAVE_DMA_RECLAIM 0 #define __HAVE_DMA_RECLAIM 0
#endif #endif
#ifndef __HAVE_SHARED_IRQ
#define __HAVE_SHARED_IRQ 0
#endif
#if __HAVE_SHARED_IRQ
#define DRM_IRQ_TYPE SA_SHIRQ
#else
#define DRM_IRQ_TYPE 0
#endif
#if __HAVE_DMA #if __HAVE_DMA
...@@ -214,293 +204,11 @@ void DRM(reclaim_buffers)( struct file *filp ) ...@@ -214,293 +204,11 @@ void DRM(reclaim_buffers)( struct file *filp )
} }
#endif #endif
#if !__HAVE_IRQ
/* This stub DRM_IOCTL_CONTROL handler is for the drivers that used to require
* IRQs for DMA but no longer do. It maintains compatibility with the X Servers
#if __HAVE_DMA_IRQ * that try to use the control ioctl by simply returning success.
/**
* Install IRQ handler.
*
* \param dev DRM device.
* \param irq IRQ number.
*
* Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
* \c DRM(driver_irq_preinstall)() and \c DRM(driver_irq_postinstall)() functions
* before and after the installation.
*/
int DRM(irq_install)( drm_device_t *dev, int irq )
{
int ret;
if ( !irq )
return -EINVAL;
down( &dev->struct_sem );
/* Driver must have been initialized */
if ( !dev->dev_private ) {
up( &dev->struct_sem );
return -EINVAL;
}
if ( dev->irq ) {
up( &dev->struct_sem );
return -EBUSY;
}
dev->irq = irq;
up( &dev->struct_sem );
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL;
#if __HAVE_DMA_IRQ_BH
INIT_WORK(&dev->work, DRM(dma_immediate_bh), dev);
#endif
#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
dev->vbl_pending = 0;
#endif
/* Before installing handler */
DRM(driver_irq_preinstall)(dev);
/* Install handler */
ret = request_irq( dev->irq, DRM(dma_service),
DRM_IRQ_TYPE, dev->devname, dev );
if ( ret < 0 ) {
down( &dev->struct_sem );
dev->irq = 0;
up( &dev->struct_sem );
return ret;
}
/* After installing handler */
DRM(driver_irq_postinstall)(dev);
return 0;
}
/**
* Uninstall the IRQ handler.
*
* \param dev DRM device.
*
* Calls the driver's \c DRM(driver_irq_uninstall)() function, and stops the irq.
*/
int DRM(irq_uninstall)( drm_device_t *dev )
{
int irq;
down( &dev->struct_sem );
irq = dev->irq;
dev->irq = 0;
up( &dev->struct_sem );
if ( !irq )
return -EINVAL;
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
DRM(driver_irq_uninstall)( dev );
free_irq( irq, dev );
return 0;
}
/**
* IRQ control ioctl.
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param arg user argument, pointing to a drm_control structure.
* \return zero on success or a negative number on failure.
*
* Calls irq_install() or irq_uninstall() according to \p arg.
*/
int DRM(control)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_control_t ctl;
if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
case DRM_INST_HANDLER:
return DRM(irq_install)( dev, ctl.irq );
case DRM_UNINST_HANDLER:
return DRM(irq_uninstall)( dev );
default:
return -EINVAL;
}
}
#if __HAVE_VBL_IRQ
/**
* Wait for VBLANK.
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param data user argument, pointing to a drm_wait_vblank structure.
* \return zero on success or a negative number on failure.
*
* Verifies the IRQ is installed.
*
* If a signal is requested checks if this task has already scheduled the same signal
* for the same vblank sequence number - nothing to be done in
* that case. If the number of tasks waiting for the interrupt exceeds 100 the
* function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
* task.
*
* If a signal is not requested, then calls vblank_wait().
*/ */
int DRM(wait_vblank)( DRM_IOCTL_ARGS )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret = 0;
unsigned int flags;
if (!dev->irq)
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE:
vblwait.request.sequence += atomic_read( &dev->vbl_received );
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
default:
return -EINVAL;
}
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sig;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
spin_lock_irqsave( &dev->vbl_lock, irqflags );
/* Check if this task has already scheduled the same signal
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current)
{
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
goto done;
}
}
if ( dev->vbl_pending >= 100 ) {
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
return -EBUSY;
}
dev->vbl_pending++;
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
return -ENOMEM;
}
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current;
spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
} else {
ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
do_gettimeofday( &now );
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
done:
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
/**
* Send the VBLANK signals.
*
* \param dev DRM device.
*
* Sends a signal for each task in drm_device::vbl_sigs and empties the list.
*
* If a signal is not requested, then calls vblank_wait().
*/
void DRM(vbl_send_signals)( drm_device_t *dev )
{
struct list_head *list, *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) {
vbl_sig = list_entry( list, drm_vbl_sig_t, head );
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
list_del( list );
DRM_FREE( vbl_sig, sizeof(*vbl_sig) );
dev->vbl_pending--;
}
}
spin_unlock_irqrestore( &dev->vbl_lock, flags );
}
#endif /* __HAVE_VBL_IRQ */
#else
int DRM(control)( struct inode *inode, struct file *filp, int DRM(control)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ) unsigned int cmd, unsigned long arg )
{ {
...@@ -517,7 +225,6 @@ int DRM(control)( struct inode *inode, struct file *filp, ...@@ -517,7 +225,6 @@ int DRM(control)( struct inode *inode, struct file *filp,
return -EINVAL; return -EINVAL;
} }
} }
#endif
#endif /* __HAVE_DMA_IRQ */
#endif /* __HAVE_DMA */ #endif /* __HAVE_DMA */
This diff is collapsed.
...@@ -72,6 +72,8 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev) ...@@ -72,6 +72,8 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev)
priv->authenticated = capable(CAP_SYS_ADMIN); priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0; priv->lock_count = 0;
DRIVER_OPEN_HELPER( priv, dev );
down(&dev->struct_sem); down(&dev->struct_sem);
if (!dev->file_last) { if (!dev->file_last) {
priv->next = NULL; priv->next = NULL;
......
...@@ -35,69 +35,7 @@ ...@@ -35,69 +35,7 @@
#include "drmP.h" #include "drmP.h"
#include "linux/pci.h"
/**
* Get interrupt from bus id.
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param arg user argument, pointing to a drm_irq_busid structure.
* \return zero on success or a negative number on failure.
*
* Finds the PCI device with the specified bus id and gets its IRQ number.
*/
int DRM(irq_busid)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_irq_busid_t p;
struct pci_dev *dev;
if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
return -EFAULT;
#ifdef __alpha__
{
int domain = p.busnum >> 8;
p.busnum &= 0xff;
/*
* Find the hose the device is on (the domain number is the
* hose index) and offset the bus by the root bus of that
* hose.
*/
for(dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,NULL);
dev;
dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,dev)) {
struct pci_controller *hose = dev->sysdata;
if (hose->index == domain) {
p.busnum += hose->bus->number;
break;
}
}
}
#endif
dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum));
if (!dev) {
DRM_ERROR("pci_find_slot failed for %d:%d:%d\n",
p.busnum, p.devnum, p.funcnum);
p.irq = 0;
goto out;
}
if (pci_enable_device(dev) != 0) {
DRM_ERROR("pci_enable_device failed for %d:%d:%d\n",
p.busnum, p.devnum, p.funcnum);
p.irq = 0;
goto out;
}
p.irq = dev->irq;
out:
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
return -EFAULT;
return 0;
}
/** /**
* Get the bus id. * Get the bus id.
...@@ -138,8 +76,10 @@ int DRM(getunique)(struct inode *inode, struct file *filp, ...@@ -138,8 +76,10 @@ int DRM(getunique)(struct inode *inode, struct file *filp,
* \param arg user argument, pointing to a drm_unique structure. * \param arg user argument, pointing to a drm_unique structure.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* Copies the bus id from userspace into drm_device::unique, and searches for * Copies the bus id from userspace into drm_device::unique, and verifies that
* the respective PCI device, updating drm_device::pdev. * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
* in interface version 1.1 and will return EBUSY when setversion has requested
* version 1.1 or greater.
*/ */
int DRM(setunique)(struct inode *inode, struct file *filp, int DRM(setunique)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
...@@ -147,6 +87,7 @@ int DRM(setunique)(struct inode *inode, struct file *filp, ...@@ -147,6 +87,7 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
drm_unique_t u; drm_unique_t u;
int domain, bus, slot, func, ret;
if (dev->unique_len || dev->unique) return -EBUSY; if (dev->unique_len || dev->unique) return -EBUSY;
...@@ -164,55 +105,42 @@ int DRM(setunique)(struct inode *inode, struct file *filp, ...@@ -164,55 +105,42 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2, dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
if(!dev->devname) { if (!dev->devname)
DRM(free)(dev->devname, sizeof(*dev->devname), DRM_MEM_DRIVER);
return -ENOMEM; return -ENOMEM;
}
sprintf(dev->devname, "%s@%s", dev->name, dev->unique); sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
do { /* Return error if the busid submitted doesn't match the device's actual
struct pci_dev *pci_dev; * busid.
int domain, b, d, f; */
char *p; ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3)
for(p = dev->unique; p && *p && *p != ':'; p++); return DRM_ERR(EINVAL);
if (!p || !*p) break; domain = bus >> 8;
b = (int)simple_strtoul(p+1, &p, 10); bus &= 0xff;
if (*p != ':') break;
d = (int)simple_strtoul(p+1, &p, 10); if ((domain != dev->pci_domain) ||
if (*p != ':') break; (bus != dev->pci_bus) ||
f = (int)simple_strtoul(p+1, &p, 10); (slot != dev->pci_slot) ||
if (*p) break; (func != dev->pci_func))
return -EINVAL;
domain = b >> 8;
b &= 0xff;
#ifdef __alpha__
/*
* Find the hose the device is on (the domain number is the
* hose index) and offset the bus by the root bus of that
* hose.
*/
for(pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,NULL);
pci_dev;
pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,pci_dev)) {
struct pci_controller *hose = pci_dev->sysdata;
if (hose->index == domain) {
b += hose->bus->number;
break;
}
}
#endif
pci_dev = pci_find_slot(b, PCI_DEVFN(d,f)); return 0;
if (pci_dev) { }
dev->pdev = pci_dev;
#ifdef __alpha__ static int
dev->hose = pci_dev->sysdata; DRM(set_busid)(drm_device_t *dev)
#endif {
} if (dev->unique != NULL)
} while(0); return EBUSY;
dev->unique_len = 20;
dev->unique = DRM(alloc)(dev->unique_len + 1, DRM_MEM_DRIVER);
if (dev->unique == NULL)
return ENOMEM;
snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
return 0; return 0;
} }
...@@ -363,3 +291,47 @@ int DRM(getstats)( struct inode *inode, struct file *filp, ...@@ -363,3 +291,47 @@ int DRM(getstats)( struct inode *inode, struct file *filp,
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 2
int DRM(setversion)(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_set_version_t sv;
drm_set_version_t retv;
int if_version;
DRM_COPY_FROM_USER_IOCTL(sv, (drm_set_version_t *)data, sizeof(sv));
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
retv.drm_dd_major = DRIVER_MAJOR;
retv.drm_dd_minor = DRIVER_MINOR;
DRM_COPY_TO_USER_IOCTL((drm_set_version_t *)data, retv, sizeof(sv));
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
return EINVAL;
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
if (sv.drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
DRM(set_busid)(dev);
}
}
if (sv.drm_dd_major != -1) {
if (sv.drm_dd_major != DRIVER_MAJOR ||
sv.drm_dd_minor < 0 || sv.drm_dd_minor > DRIVER_MINOR)
return EINVAL;
#ifdef DRIVER_SETVERSION
DRIVER_SETVERSION(dev, &sv);
#endif
}
return 0;
}
/**
* \file drm_irq.h
* IRQ support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include <linux/interrupt.h> /* For task queue support */
#ifndef __HAVE_SHARED_IRQ
#define __HAVE_SHARED_IRQ 0
#endif
#if __HAVE_SHARED_IRQ
#define DRM_IRQ_TYPE SA_SHIRQ
#else
#define DRM_IRQ_TYPE 0
#endif
/**
* Get interrupt from bus id.
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param arg user argument, pointing to a drm_irq_busid structure.
* \return zero on success or a negative number on failure.
*
* Finds the PCI device with the specified bus id and gets its IRQ number.
* This IOCTL is deprecated, and will now return EINVAL for any busid not equal
* to that of the device that this DRM instance attached to.
*/
int DRM(irq_by_busid)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_irq_busid_t p;
if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
return -EFAULT;
if ((p.busnum >> 8) != dev->pci_domain ||
(p.busnum & 0xff) != dev->pci_bus ||
p.devnum != dev->pci_slot ||
p.funcnum != dev->pci_func)
return -EINVAL;
p.irq = dev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
return -EFAULT;
return 0;
}
#if __HAVE_IRQ
/**
* Install IRQ handler.
*
* \param dev DRM device.
* \param irq IRQ number.
*
* Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
* \c DRM(driver_irq_preinstall)() and \c DRM(driver_irq_postinstall)() functions
* before and after the installation.
*/
int DRM(irq_install)( drm_device_t *dev )
{
int ret;
if ( dev->irq == 0 )
return -EINVAL;
down( &dev->struct_sem );
/* Driver must have been initialized */
if ( !dev->dev_private ) {
up( &dev->struct_sem );
return -EINVAL;
}
if ( dev->irq_enabled ) {
up( &dev->struct_sem );
return -EBUSY;
}
dev->irq_enabled = 1;
up( &dev->struct_sem );
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
#if __HAVE_DMA
dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL;
#endif
#if __HAVE_IRQ_BH
INIT_WORK(&dev->work, DRM(irq_immediate_bh), dev);
#endif
#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
dev->vbl_pending = 0;
#endif
/* Before installing handler */
DRM(driver_irq_preinstall)(dev);
/* Install handler */
ret = request_irq( dev->irq, DRM(irq_handler),
DRM_IRQ_TYPE, dev->devname, dev );
if ( ret < 0 ) {
down( &dev->struct_sem );
dev->irq_enabled = 0;
up( &dev->struct_sem );
return ret;
}
/* After installing handler */
DRM(driver_irq_postinstall)(dev);
return 0;
}
/**
* Uninstall the IRQ handler.
*
* \param dev DRM device.
*
* Calls the driver's \c DRM(driver_irq_uninstall)() function, and stops the irq.
*/
int DRM(irq_uninstall)( drm_device_t *dev )
{
int irq_enabled;
down( &dev->struct_sem );
irq_enabled = dev->irq_enabled;
dev->irq_enabled = 0;
up( &dev->struct_sem );
if ( !irq_enabled )
return -EINVAL;
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
DRM(driver_irq_uninstall)( dev );
free_irq( dev->irq, dev );
return 0;
}
/**
* IRQ control ioctl.
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param arg user argument, pointing to a drm_control structure.
* \return zero on success or a negative number on failure.
*
* Calls irq_install() or irq_uninstall() according to \p arg.
*/
int DRM(control)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_control_t ctl;
if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
case DRM_INST_HANDLER:
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl.irq != dev->irq)
return -EINVAL;
return DRM(irq_install)( dev );
case DRM_UNINST_HANDLER:
return DRM(irq_uninstall)( dev );
default:
return -EINVAL;
}
}
#if __HAVE_VBL_IRQ
/**
* Wait for VBLANK.
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param data user argument, pointing to a drm_wait_vblank structure.
* \return zero on success or a negative number on failure.
*
* Verifies the IRQ is installed.
*
* If a signal is requested checks if this task has already scheduled the same signal
* for the same vblank sequence number - nothing to be done in
* that case. If the number of tasks waiting for the interrupt exceeds 100 the
* function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
* task.
*
* If a signal is not requested, then calls vblank_wait().
*/
int DRM(wait_vblank)( DRM_IOCTL_ARGS )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret = 0;
unsigned int flags;
if (!dev->irq)
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE:
vblwait.request.sequence += atomic_read( &dev->vbl_received );
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
default:
return -EINVAL;
}
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sig;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
spin_lock_irqsave( &dev->vbl_lock, irqflags );
/* Check if this task has already scheduled the same signal
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current)
{
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
goto done;
}
}
if ( dev->vbl_pending >= 100 ) {
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
return -EBUSY;
}
dev->vbl_pending++;
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
return -ENOMEM;
}
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current;
spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
} else {
ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
do_gettimeofday( &now );
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
done:
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
/**
* Send the VBLANK signals.
*
* \param dev DRM device.
*
* Sends a signal for each task in drm_device::vbl_sigs and empties the list.
*
* If a signal is not requested, then calls vblank_wait().
*/
void DRM(vbl_send_signals)( drm_device_t *dev )
{
struct list_head *list, *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) {
vbl_sig = list_entry( list, drm_vbl_sig_t, head );
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
list_del( list );
DRM_FREE( vbl_sig, sizeof(*vbl_sig) );
dev->vbl_pending--;
}
}
spin_unlock_irqrestore( &dev->vbl_lock, flags );
}
#endif /* __HAVE_VBL_IRQ */
#endif /* __HAVE_IRQ */
...@@ -67,6 +67,7 @@ static drm_mem_stats_t DRM(mem_stats)[] = { ...@@ -67,6 +67,7 @@ static drm_mem_stats_t DRM(mem_stats)[] = {
[DRM_MEM_TOTALAGP] = { "totalagp" }, [DRM_MEM_TOTALAGP] = { "totalagp" },
[DRM_MEM_BOUNDAGP] = { "boundagp" }, [DRM_MEM_BOUNDAGP] = { "boundagp" },
[DRM_MEM_CTXBITMAP] = { "ctxbitmap"}, [DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
[DRM_MEM_CTXLIST] = { "ctxlist" },
[DRM_MEM_STUB] = { "stub" }, [DRM_MEM_STUB] = { "stub" },
{ NULL, 0, } /* Last entry must be null */ { NULL, 0, } /* Last entry must be null */
}; };
......
...@@ -62,8 +62,12 @@ ...@@ -62,8 +62,12 @@
verify_area( VERIFY_READ, uaddr, size ) verify_area( VERIFY_READ, uaddr, size )
#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_from_user(arg1, arg2, arg3) __copy_from_user(arg1, arg2, arg3)
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_to_user(arg1, arg2, arg3)
#define DRM_GET_USER_UNCHECKED(val, uaddr) \ #define DRM_GET_USER_UNCHECKED(val, uaddr) \
__get_user(val, uaddr) __get_user(val, uaddr)
#define DRM_PUT_USER_UNCHECKED(uaddr, val) \
__put_user(val, uaddr)
/** 'malloc' without the overhead of DRM(alloc)() */ /** 'malloc' without the overhead of DRM(alloc)() */
...@@ -71,6 +75,8 @@ ...@@ -71,6 +75,8 @@
/** 'free' without the overhead of DRM(free)() */ /** 'free' without the overhead of DRM(free)() */
#define DRM_FREE(x,size) kfree(x) #define DRM_FREE(x,size) kfree(x)
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
/** /**
* Get the pointer to the SAREA. * Get the pointer to the SAREA.
* *
......
/*
This file is auto-generated from the drm_pciids.txt in the DRM CVS
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define radeon_PCI_IDS \
{0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x514A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x514B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x514E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x514F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5963, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5968, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define r128_PCI_IDS \
{0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define mga_PCI_IDS \
{0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define mach64_PCI_IDS \
{0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define sisdrv_PCI_IDS \
{0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define tdfx_PCI_IDS \
{0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define viadrv_PCI_IDS \
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define i810_PCI_IDS \
{0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define i830_PCI_IDS \
{0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define gamma_PCI_IDS \
{0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define savage_PCI_IDS \
{0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define ffb_PCI_IDS \
{0, 0, 0}
...@@ -32,9 +32,23 @@ ...@@ -32,9 +32,23 @@
#ifndef _DRM_SAREA_H_ #ifndef _DRM_SAREA_H_
#define _DRM_SAREA_H_ #define _DRM_SAREA_H_
#include "drm.h"
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
#define SAREA_MAX 0x2000
#elif defined(__ia64__)
#define SAREA_MAX 0x10000 /* 64kB */
#else
/* Intel 830M driver needs at least 8k SAREA */
#define SAREA_MAX 0x2000
#endif
/** Maximum number of drawables in the SAREA */ /** Maximum number of drawables in the SAREA */
#define SAREA_MAX_DRAWABLES 256 #define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */ /** SAREA drawable */
typedef struct drm_sarea_drawable { typedef struct drm_sarea_drawable {
unsigned int stamp; unsigned int stamp;
......
...@@ -209,8 +209,8 @@ int DRM(stub_register)(const char *name, struct file_operations *fops, ...@@ -209,8 +209,8 @@ int DRM(stub_register)(const char *name, struct file_operations *fops,
ret2 = DRM(stub_info).info_register(name, fops, dev); ret2 = DRM(stub_info).info_register(name, fops, dev);
if (ret2) { if (ret2) {
if (!ret1) { if (!ret1) {
unregister_chrdev(DRM_MAJOR, "drm"); unregister_chrdev(DRM_MAJOR, "drm");
class_simple_destroy(drm_class); class_simple_destroy(drm_class);
} }
if (!i) if (!i)
inter_module_unregister("drm"); inter_module_unregister("drm");
......
...@@ -35,48 +35,19 @@ ...@@ -35,48 +35,19 @@
#include "drmP.h" #include "drmP.h"
/** AGP virtual memory operations */
struct vm_operations_struct DRM(vm_ops) = {
.nopage = DRM(vm_nopage),
.open = DRM(vm_open),
.close = DRM(vm_close),
};
/** Shared virtual memory operations */
struct vm_operations_struct DRM(vm_shm_ops) = {
.nopage = DRM(vm_shm_nopage),
.open = DRM(vm_open),
.close = DRM(vm_shm_close),
};
/** DMA virtual memory operations */
struct vm_operations_struct DRM(vm_dma_ops) = {
.nopage = DRM(vm_dma_nopage),
.open = DRM(vm_open),
.close = DRM(vm_close),
};
/** Scatter-gather virtual memory operations */
struct vm_operations_struct DRM(vm_sg_ops) = {
.nopage = DRM(vm_sg_nopage),
.open = DRM(vm_open),
.close = DRM(vm_close),
};
/** /**
* \c nopage method for AGP virtual memory. * \c nopage method for AGP virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
* \param write_access sharing.
* \return pointer to the page structure. * \return pointer to the page structure.
* *
* Find the right map and if it's AGP memory find the real physical page to * Find the right map and if it's AGP memory find the real physical page to
* map, get the page, increment the use count and return it. * map, get the page, increment the use count and return it.
*/ */
struct page *DRM(vm_nopage)(struct vm_area_struct *vma, static __inline__ struct page *DRM(do_vm_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address)
int *type)
{ {
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
drm_file_t *priv = vma->vm_file->private_data; drm_file_t *priv = vma->vm_file->private_data;
...@@ -133,8 +104,6 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, ...@@ -133,8 +104,6 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
baddr, __va(agpmem->memory->memory[offset]), offset, baddr, __va(agpmem->memory->memory[offset]), offset,
atomic_read(&page->count)); atomic_read(&page->count));
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
vm_nopage_error: vm_nopage_error:
...@@ -148,15 +117,13 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, ...@@ -148,15 +117,13 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
* \param write_access sharing.
* \return pointer to the page structure. * \return pointer to the page structure.
* *
* Get the the mapping, find the real physical page to map, get the page, and * Get the the mapping, find the real physical page to map, get the page, and
* return it. * return it.
*/ */
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, static __inline__ struct page *DRM(do_vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address)
int *type)
{ {
drm_map_t *map = (drm_map_t *)vma->vm_private_data; drm_map_t *map = (drm_map_t *)vma->vm_private_data;
unsigned long offset; unsigned long offset;
...@@ -172,8 +139,6 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, ...@@ -172,8 +139,6 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
if (!page) if (!page)
return NOPAGE_OOM; return NOPAGE_OOM;
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
DRM_DEBUG("shm_nopage 0x%lx\n", address); DRM_DEBUG("shm_nopage 0x%lx\n", address);
return page; return page;
...@@ -265,14 +230,12 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma) ...@@ -265,14 +230,12 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
* \param write_access sharing.
* \return pointer to the page structure. * \return pointer to the page structure.
* *
* Determine the page number from the page offset and get it from drm_device_dma::pagelist. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/ */
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, static __inline__ struct page *DRM(do_vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address)
int *type)
{ {
drm_file_t *priv = vma->vm_file->private_data; drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev; drm_device_t *dev = priv->dev;
...@@ -291,8 +254,6 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, ...@@ -291,8 +254,6 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
(offset & (~PAGE_MASK)))); (offset & (~PAGE_MASK))));
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
return page; return page;
...@@ -303,14 +264,12 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, ...@@ -303,14 +264,12 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
* \param write_access sharing.
* \return pointer to the page structure. * \return pointer to the page structure.
* *
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/ */
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, static __inline__ struct page *DRM(do_vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address, unsigned long address)
int *type)
{ {
drm_map_t *map = (drm_map_t *)vma->vm_private_data; drm_map_t *map = (drm_map_t *)vma->vm_private_data;
drm_file_t *priv = vma->vm_file->private_data; drm_file_t *priv = vma->vm_file->private_data;
...@@ -331,12 +290,99 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, ...@@ -331,12 +290,99 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset]; page = entry->pagelist[page_offset];
get_page(page); get_page(page);
if (type)
*type = VM_FAULT_MINOR;
return page; return page;
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
return DRM(do_vm_nopage)(vma, address);
}
static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
return DRM(do_vm_shm_nopage)(vma, address);
}
static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
return DRM(do_vm_dma_nopage)(vma, address);
}
static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int *type) {
if (type) *type = VM_FAULT_MINOR;
return DRM(do_vm_sg_nopage)(vma, address);
}
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return DRM(do_vm_nopage)(vma, address);
}
static struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return DRM(do_vm_shm_nopage)(vma, address);
}
static struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return DRM(do_vm_dma_nopage)(vma, address);
}
static struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return DRM(do_vm_sg_nopage)(vma, address);
}
#endif
/** AGP virtual memory operations */
static struct vm_operations_struct DRM(vm_ops) = {
.nopage = DRM(vm_nopage),
.open = DRM(vm_open),
.close = DRM(vm_close),
};
/** Shared virtual memory operations */
static struct vm_operations_struct DRM(vm_shm_ops) = {
.nopage = DRM(vm_shm_nopage),
.open = DRM(vm_open),
.close = DRM(vm_shm_close),
};
/** DMA virtual memory operations */
static struct vm_operations_struct DRM(vm_dma_ops) = {
.nopage = DRM(vm_dma_nopage),
.open = DRM(vm_open),
.close = DRM(vm_close),
};
/** Scatter-gather virtual memory operations */
static struct vm_operations_struct DRM(vm_sg_ops) = {
.nopage = DRM(vm_sg_nopage),
.open = DRM(vm_open),
.close = DRM(vm_close),
};
/** /**
* \c open method for shared virtual memory. * \c open method for shared virtual memory.
* *
......
...@@ -13,3 +13,4 @@ ...@@ -13,3 +13,4 @@
#define __HAVE_KERNEL_CTX_SWITCH 1 #define __HAVE_KERNEL_CTX_SWITCH 1
#define __HAVE_RELEASE 1 #define __HAVE_RELEASE 1
#endif #endif
...@@ -104,8 +104,8 @@ ...@@ -104,8 +104,8 @@
return 0; \ return 0; \
} while (0) } while (0)
#define __HAVE_DMA_IRQ 1 #define __HAVE_IRQ 1
#define __HAVE_DMA_IRQ_BH 1 #define __HAVE_IRQ_BH 1
#define DRIVER_AGP_BUFFERS_MAP( dev ) \ #define DRIVER_AGP_BUFFERS_MAP( dev ) \
((drm_gamma_private_t *)((dev)->dev_private))->buffers ((drm_gamma_private_t *)((dev)->dev_private))->buffers
......
...@@ -116,7 +116,7 @@ static inline int gamma_dma_is_ready(drm_device_t *dev) ...@@ -116,7 +116,7 @@ static inline int gamma_dma_is_ready(drm_device_t *dev)
return (!GAMMA_READ(GAMMA_DMACOUNT)); return (!GAMMA_READ(GAMMA_DMACOUNT));
} }
irqreturn_t gamma_dma_service( DRM_IRQ_ARGS ) irqreturn_t gamma_irq_handler( DRM_IRQ_ARGS )
{ {
drm_device_t *dev = (drm_device_t *)arg; drm_device_t *dev = (drm_device_t *)arg;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
...@@ -262,7 +262,7 @@ static void gamma_dma_timer_bh(unsigned long dev) ...@@ -262,7 +262,7 @@ static void gamma_dma_timer_bh(unsigned long dev)
gamma_dma_schedule((drm_device_t *)dev, 0); gamma_dma_schedule((drm_device_t *)dev, 0);
} }
void gamma_dma_immediate_bh(void *dev) void gamma_irq_immediate_bh(void *dev)
{ {
gamma_dma_schedule(dev, 0); gamma_dma_schedule(dev, 0);
} }
...@@ -656,12 +656,12 @@ int gamma_do_cleanup_dma( drm_device_t *dev ) ...@@ -656,12 +656,12 @@ int gamma_do_cleanup_dma( drm_device_t *dev )
{ {
DRM_DEBUG( "%s\n", __FUNCTION__ ); DRM_DEBUG( "%s\n", __FUNCTION__ );
#if _HAVE_DMA_IRQ #if __HAVE_IRQ
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
* is freed, it's too late. * is freed, it's too late.
*/ */
if ( dev->irq ) DRM(irq_uninstall)(dev); if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
#endif #endif
if ( dev->dev_private ) { if ( dev->dev_private ) {
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "drm_fops.h" #include "drm_fops.h"
#include "drm_init.h" #include "drm_init.h"
#include "drm_ioctl.h" #include "drm_ioctl.h"
#include "drm_irq.h"
#include "gamma_lists.h" /* NOTE */ #include "gamma_lists.h" /* NOTE */
#include "drm_lock.h" #include "drm_lock.h"
#include "gamma_lock.h" /* NOTE */ #include "gamma_lock.h" /* NOTE */
......
...@@ -77,7 +77,6 @@ ...@@ -77,7 +77,6 @@
[DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_FLIP)] = { i810_flip_bufs, 1, 0 } [DRM_IOCTL_NR(DRM_IOCTL_I810_FLIP)] = { i810_flip_bufs, 1, 0 }
#define __HAVE_COUNTERS 4 #define __HAVE_COUNTERS 4
#define __HAVE_COUNTER6 _DRM_STAT_IRQ #define __HAVE_COUNTER6 _DRM_STAT_IRQ
...@@ -112,7 +111,7 @@ ...@@ -112,7 +111,7 @@
* a noop stub is generated for compatibility. * a noop stub is generated for compatibility.
*/ */
/* XXX: Add vblank support? */ /* XXX: Add vblank support? */
#define __HAVE_DMA_IRQ 0 #define __HAVE_IRQ 0
/* Buffer customization: /* Buffer customization:
*/ */
......
...@@ -232,12 +232,12 @@ int i810_dma_cleanup(drm_device_t *dev) ...@@ -232,12 +232,12 @@ int i810_dma_cleanup(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
#if _HAVE_DMA_IRQ #if __HAVE_IRQ
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
* is freed, it's too late. * is freed, it's too late.
*/ */
if (dev->irq) DRM(irq_uninstall)(dev); if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
#endif #endif
if (dev->dev_private) { if (dev->dev_private) {
......
...@@ -115,10 +115,10 @@ ...@@ -115,10 +115,10 @@
#define USE_IRQS 0 #define USE_IRQS 0
#if USE_IRQS #if USE_IRQS
#define __HAVE_DMA_IRQ 1 #define __HAVE_IRQ 1
#define __HAVE_SHARED_IRQ 1 #define __HAVE_SHARED_IRQ 1
#else #else
#define __HAVE_DMA_IRQ 0 #define __HAVE_IRQ 0
#endif #endif
......
...@@ -232,12 +232,12 @@ int i830_dma_cleanup(drm_device_t *dev) ...@@ -232,12 +232,12 @@ int i830_dma_cleanup(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
#if _HAVE_DMA_IRQ #if __HAVE_IRQ
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
* is freed, it's too late. * is freed, it's too late.
*/ */
if (dev->irq) DRM(irq_uninstall)(dev); if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
#endif #endif
if (dev->dev_private) { if (dev->dev_private) {
...@@ -1540,7 +1540,7 @@ int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd, ...@@ -1540,7 +1540,7 @@ int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
switch( param.param ) { switch( param.param ) {
case I830_PARAM_IRQ_ACTIVE: case I830_PARAM_IRQ_ACTIVE:
value = dev->irq ? 1 : 0; value = dev->irq_enabled;
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include "drm_fops.h" #include "drm_fops.h"
#include "drm_init.h" #include "drm_init.h"
#include "drm_ioctl.h" #include "drm_ioctl.h"
#include "drm_irq.h"
#include "drm_lock.h" #include "drm_lock.h"
#include "drm_memory.h" #include "drm_memory.h"
#include "drm_proc.h" #include "drm_proc.h"
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <linux/delay.h> #include <linux/delay.h>
irqreturn_t DRM(dma_service)( DRM_IRQ_ARGS ) irqreturn_t DRM(irq_handler)( DRM_IRQ_ARGS )
{ {
drm_device_t *dev = (drm_device_t *)arg; drm_device_t *dev = (drm_device_t *)arg;
drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
......
...@@ -78,7 +78,7 @@ ...@@ -78,7 +78,7 @@
/* DMA customization: /* DMA customization:
*/ */
#define __HAVE_DMA 1 #define __HAVE_DMA 1
#define __HAVE_DMA_IRQ 1 #define __HAVE_IRQ 1
#define __HAVE_VBL_IRQ 1 #define __HAVE_VBL_IRQ 1
#define __HAVE_SHARED_IRQ 1 #define __HAVE_SHARED_IRQ 1
......
...@@ -500,14 +500,6 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) ...@@ -500,14 +500,6 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
if(!dev_priv->fb) {
DRM_ERROR( "failed to find framebuffer!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL);
}
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset ); DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
if(!dev_priv->mmio) { if(!dev_priv->mmio) {
DRM_ERROR( "failed to find mmio region!\n" ); DRM_ERROR( "failed to find mmio region!\n" );
...@@ -639,12 +631,12 @@ int mga_do_cleanup_dma( drm_device_t *dev ) ...@@ -639,12 +631,12 @@ int mga_do_cleanup_dma( drm_device_t *dev )
{ {
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
#if _HAVE_DMA_IRQ #if __HAVE_IRQ
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
* is freed, it's too late. * is freed, it's too late.
*/ */
if ( dev->irq ) DRM(irq_uninstall)(dev); if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
#endif #endif
if ( dev->dev_private ) { if ( dev->dev_private ) {
......
...@@ -117,6 +117,8 @@ ...@@ -117,6 +117,8 @@
#define MGA_NR_TEX_REGIONS 16 #define MGA_NR_TEX_REGIONS 16
#define MGA_LOG_MIN_TEX_REGION_SIZE 16 #define MGA_LOG_MIN_TEX_REGION_SIZE 16
#define DRM_MGA_IDLE_RETRY 2048
#endif /* __MGA_SAREA_DEFINES__ */ #endif /* __MGA_SAREA_DEFINES__ */
...@@ -230,16 +232,27 @@ typedef struct _drm_mga_sarea { ...@@ -230,16 +232,27 @@ typedef struct _drm_mga_sarea {
/* MGA specific ioctls /* MGA specific ioctls
* The device specific ioctl range is 0x40 to 0x79. * The device specific ioctl range is 0x40 to 0x79.
*/ */
#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) #define DRM_MGA_INIT 0x00
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t) #define DRM_MGA_FLUSH 0x01
#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42) #define DRM_MGA_RESET 0x02
#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43) #define DRM_MGA_SWAP 0x03
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t) #define DRM_MGA_CLEAR 0x04
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t) #define DRM_MGA_VERTEX 0x05
#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) #define DRM_MGA_INDICES 0x06
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t) #define DRM_MGA_ILOAD 0x07
#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t) #define DRM_MGA_BLIT 0x08
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(0x49, drm_mga_getparam_t) #define DRM_MGA_GETPARAM 0x09
#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
typedef struct _drm_mga_warp_index { typedef struct _drm_mga_warp_index {
int installed; int installed;
...@@ -330,7 +343,7 @@ typedef struct _drm_mga_blit { ...@@ -330,7 +343,7 @@ typedef struct _drm_mga_blit {
typedef struct drm_mga_getparam { typedef struct drm_mga_getparam {
int param; int param;
int *value; void *value;
} drm_mga_getparam_t; } drm_mga_getparam_t;
#endif #endif
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "drm_fops.h" #include "drm_fops.h"
#include "drm_init.h" #include "drm_init.h"
#include "drm_ioctl.h" #include "drm_ioctl.h"
#include "drm_irq.h"
#include "drm_lock.h" #include "drm_lock.h"
#include "drm_memory.h" #include "drm_memory.h"
#include "drm_proc.h" #include "drm_proc.h"
......
...@@ -91,7 +91,6 @@ typedef struct drm_mga_private { ...@@ -91,7 +91,6 @@ typedef struct drm_mga_private {
unsigned int texture_size; unsigned int texture_size;
drm_local_map_t *sarea; drm_local_map_t *sarea;
drm_local_map_t *fb;
drm_local_map_t *mmio; drm_local_map_t *mmio;
drm_local_map_t *status; drm_local_map_t *status;
drm_local_map_t *warp; drm_local_map_t *warp;
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#include "mga_drm.h" #include "mga_drm.h"
#include "mga_drv.h" #include "mga_drv.h"
irqreturn_t mga_dma_service( DRM_IRQ_ARGS ) irqreturn_t mga_irq_handler( DRM_IRQ_ARGS )
{ {
drm_device_t *dev = (drm_device_t *) arg; drm_device_t *dev = (drm_device_t *) arg;
drm_mga_private_t *dev_priv = drm_mga_private_t *dev_priv =
......
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
/* DMA customization: /* DMA customization:
*/ */
#define __HAVE_DMA 1 #define __HAVE_DMA 1
#define __HAVE_DMA_IRQ 1 #define __HAVE_IRQ 1
#define __HAVE_VBL_IRQ 1 #define __HAVE_VBL_IRQ 1
#define __HAVE_SHARED_IRQ 1 #define __HAVE_SHARED_IRQ 1
......
...@@ -212,7 +212,7 @@ int r128_do_cce_idle( drm_r128_private_t *dev_priv ) ...@@ -212,7 +212,7 @@ int r128_do_cce_idle( drm_r128_private_t *dev_priv )
int i; int i;
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
if ( GET_RING_HEAD( &dev_priv->ring ) == dev_priv->ring.tail ) { if ( GET_RING_HEAD( dev_priv ) == dev_priv->ring.tail ) {
int pm4stat = R128_READ( R128_PM4_STAT ); int pm4stat = R128_READ( R128_PM4_STAT );
if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >= if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >=
dev_priv->cce_fifo_size ) && dev_priv->cce_fifo_size ) &&
...@@ -238,7 +238,8 @@ static void r128_do_cce_start( drm_r128_private_t *dev_priv ) ...@@ -238,7 +238,8 @@ static void r128_do_cce_start( drm_r128_private_t *dev_priv )
r128_do_wait_for_idle( dev_priv ); r128_do_wait_for_idle( dev_priv );
R128_WRITE( R128_PM4_BUFFER_CNTL, R128_WRITE( R128_PM4_BUFFER_CNTL,
dev_priv->cce_mode | dev_priv->ring.size_l2qw ); dev_priv->cce_mode | dev_priv->ring.size_l2qw
| R128_PM4_BUFFER_CNTL_NOUPDATE );
R128_READ( R128_PM4_BUFFER_ADDR ); /* as per the sample code */ R128_READ( R128_PM4_BUFFER_ADDR ); /* as per the sample code */
R128_WRITE( R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN ); R128_WRITE( R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN );
...@@ -253,7 +254,6 @@ static void r128_do_cce_reset( drm_r128_private_t *dev_priv ) ...@@ -253,7 +254,6 @@ static void r128_do_cce_reset( drm_r128_private_t *dev_priv )
{ {
R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 ); R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 );
R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 ); R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 );
SET_RING_HEAD( &dev_priv->ring, 0 );
dev_priv->ring.tail = 0; dev_priv->ring.tail = 0;
} }
...@@ -264,7 +264,8 @@ static void r128_do_cce_reset( drm_r128_private_t *dev_priv ) ...@@ -264,7 +264,8 @@ static void r128_do_cce_reset( drm_r128_private_t *dev_priv )
static void r128_do_cce_stop( drm_r128_private_t *dev_priv ) static void r128_do_cce_stop( drm_r128_private_t *dev_priv )
{ {
R128_WRITE( R128_PM4_MICRO_CNTL, 0 ); R128_WRITE( R128_PM4_MICRO_CNTL, 0 );
R128_WRITE( R128_PM4_BUFFER_CNTL, R128_PM4_NONPM4 ); R128_WRITE( R128_PM4_BUFFER_CNTL,
R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE );
dev_priv->cce_running = 0; dev_priv->cce_running = 0;
} }
...@@ -333,26 +334,6 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev, ...@@ -333,26 +334,6 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 ); R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 );
R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 ); R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 );
/* DL_RPTR_ADDR is a physical address in AGP space. */
SET_RING_HEAD( &dev_priv->ring, 0 );
if ( !dev_priv->is_pci ) {
R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
dev_priv->ring_rptr->offset );
} else {
drm_sg_mem_t *entry = dev->sg;
unsigned long tmp_ofs, page_ofs;
tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle;
page_ofs = tmp_ofs >> PAGE_SHIFT;
R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
entry->busaddr[page_ofs]);
DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
(unsigned long) entry->busaddr[page_ofs],
entry->handle + tmp_ofs );
}
/* Set watermark control */ /* Set watermark control */
R128_WRITE( R128_PM4_BUFFER_WM_CNTL, R128_WRITE( R128_PM4_BUFFER_WM_CNTL,
((R128_WATERMARK_L/4) << R128_WMA_SHIFT) ((R128_WATERMARK_L/4) << R128_WMA_SHIFT)
...@@ -486,13 +467,6 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -486,13 +467,6 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
if(!dev_priv->fb) {
DRM_ERROR("could not find framebuffer!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce( dev );
return DRM_ERR(EINVAL);
}
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset ); DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
if(!dev_priv->mmio) { if(!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n"); DRM_ERROR("could not find mmio region!\n");
...@@ -567,9 +541,6 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -567,9 +541,6 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
#endif #endif
dev_priv->cce_buffers_offset = dev->sg->handle; dev_priv->cce_buffers_offset = dev->sg->handle;
dev_priv->ring.head = ((__volatile__ u32 *)
dev_priv->ring_rptr->handle);
dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
+ init->ring_size / sizeof(u32)); + init->ring_size / sizeof(u32));
...@@ -580,7 +551,6 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -580,7 +551,6 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
(dev_priv->ring.size / sizeof(u32)) - 1; (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = 128; dev_priv->ring.high_mark = 128;
dev_priv->ring.ring_rptr = dev_priv->ring_rptr;
dev_priv->sarea_priv->last_frame = 0; dev_priv->sarea_priv->last_frame = 0;
R128_WRITE( R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame ); R128_WRITE( R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame );
...@@ -589,8 +559,9 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -589,8 +559,9 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
R128_WRITE( R128_LAST_DISPATCH_REG, R128_WRITE( R128_LAST_DISPATCH_REG,
dev_priv->sarea_priv->last_dispatch ); dev_priv->sarea_priv->last_dispatch );
#if __REALLY_HAVE_SG #if __REALLY_HAVE_AGP
if ( dev_priv->is_pci ) { if ( dev_priv->is_pci ) {
#endif
if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart, if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart,
&dev_priv->bus_pci_gart) ) { &dev_priv->bus_pci_gart) ) {
DRM_ERROR( "failed to init PCI GART!\n" ); DRM_ERROR( "failed to init PCI GART!\n" );
...@@ -599,6 +570,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -599,6 +570,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
R128_WRITE( R128_PCI_GART_PAGE, dev_priv->bus_pci_gart ); R128_WRITE( R128_PCI_GART_PAGE, dev_priv->bus_pci_gart );
#if __REALLY_HAVE_AGP
} }
#endif #endif
...@@ -615,12 +587,12 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) ...@@ -615,12 +587,12 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
int r128_do_cleanup_cce( drm_device_t *dev ) int r128_do_cleanup_cce( drm_device_t *dev )
{ {
#if _HAVE_DMA_IRQ #if __HAVE_IRQ
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
* is freed, it's too late. * is freed, it's too late.
*/ */
if ( dev->irq ) DRM(irq_uninstall)(dev); if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
#endif #endif
if ( dev->dev_private ) { if ( dev->dev_private ) {
...@@ -901,7 +873,7 @@ int r128_wait_ring( drm_r128_private_t *dev_priv, int n ) ...@@ -901,7 +873,7 @@ int r128_wait_ring( drm_r128_private_t *dev_priv, int n )
int i; int i;
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
r128_update_ring_snapshot( ring ); r128_update_ring_snapshot( dev_priv );
if ( ring->space >= n ) if ( ring->space >= n )
return 0; return 0;
DRM_UDELAY( 1 ); DRM_UDELAY( 1 );
......
...@@ -176,24 +176,47 @@ typedef struct drm_r128_sarea { ...@@ -176,24 +176,47 @@ typedef struct drm_r128_sarea {
/* Rage 128 specific ioctls /* Rage 128 specific ioctls
* The device specific ioctl range is 0x40 to 0x79. * The device specific ioctl range is 0x40 to 0x79.
*/ */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) #define DRM_R128_INIT 0x00
#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) #define DRM_R128_CCE_START 0x01
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) #define DRM_R128_CCE_STOP 0x02
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) #define DRM_R128_CCE_RESET 0x03
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) #define DRM_R128_CCE_IDLE 0x04
#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) /* 0x05 not used */
#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47) #define DRM_R128_RESET 0x06
#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t) #define DRM_R128_SWAP 0x07
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t) #define DRM_R128_CLEAR 0x08
#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t) #define DRM_R128_VERTEX 0x09
#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t) #define DRM_R128_INDICES 0x0a
#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t) #define DRM_R128_BLIT 0x0b
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) #define DRM_R128_DEPTH 0x0c
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) #define DRM_R128_STIPPLE 0x0d
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) /* 0x0e not used */
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t) #define DRM_R128_INDIRECT 0x0f
#define DRM_IOCTL_R128_GETPARAM DRM_IOW( 0x52, drm_r128_getparam_t) #define DRM_R128_FULLSCREEN 0x10
#define DRM_IOCTL_R128_FLIP DRM_IO( 0x53) #define DRM_R128_CLEAR2 0x11
#define DRM_R128_GETPARAM 0x12
#define DRM_R128_FLIP 0x13
#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
/* 0x05 not used */
#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
/* 0x0e not used */
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
#define DRM_IOCTL_R128_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
typedef struct drm_r128_init { typedef struct drm_r128_init {
enum { enum {
...@@ -316,7 +339,7 @@ typedef struct drm_r128_fullscreen { ...@@ -316,7 +339,7 @@ typedef struct drm_r128_fullscreen {
typedef struct drm_r128_getparam { typedef struct drm_r128_getparam {
int param; int param;
int *value; void *value;
} drm_r128_getparam_t; } drm_r128_getparam_t;
#endif #endif
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include "drm_fops.h" #include "drm_fops.h"
#include "drm_init.h" #include "drm_init.h"
#include "drm_ioctl.h" #include "drm_ioctl.h"
#include "drm_irq.h"
#include "drm_lock.h" #include "drm_lock.h"
#include "drm_memory.h" #include "drm_memory.h"
#include "drm_proc.h" #include "drm_proc.h"
......
...@@ -34,8 +34,7 @@ ...@@ -34,8 +34,7 @@
#ifndef __R128_DRV_H__ #ifndef __R128_DRV_H__
#define __R128_DRV_H__ #define __R128_DRV_H__
#define GET_RING_HEAD(ring) DRM_READ32( (ring)->ring_rptr, 0 ) /* (ring)->head */ #define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR )
#define SET_RING_HEAD(ring,val) DRM_WRITE32( (ring)->ring_rptr, 0, (val) ) /* (ring)->head */
typedef struct drm_r128_freelist { typedef struct drm_r128_freelist {
unsigned int age; unsigned int age;
...@@ -50,13 +49,11 @@ typedef struct drm_r128_ring_buffer { ...@@ -50,13 +49,11 @@ typedef struct drm_r128_ring_buffer {
int size; int size;
int size_l2qw; int size_l2qw;
volatile u32 *head;
u32 tail; u32 tail;
u32 tail_mask; u32 tail_mask;
int space; int space;
int high_mark; int high_mark;
drm_local_map_t *ring_rptr;
} drm_r128_ring_buffer_t; } drm_r128_ring_buffer_t;
typedef struct drm_r128_private { typedef struct drm_r128_private {
...@@ -100,7 +97,6 @@ typedef struct drm_r128_private { ...@@ -100,7 +97,6 @@ typedef struct drm_r128_private {
u32 span_pitch_offset_c; u32 span_pitch_offset_c;
drm_local_map_t *sarea; drm_local_map_t *sarea;
drm_local_map_t *fb;
drm_local_map_t *mmio; drm_local_map_t *mmio;
drm_local_map_t *cce_ring; drm_local_map_t *cce_ring;
drm_local_map_t *ring_rptr; drm_local_map_t *ring_rptr;
...@@ -132,14 +128,6 @@ extern drm_buf_t *r128_freelist_get( drm_device_t *dev ); ...@@ -132,14 +128,6 @@ extern drm_buf_t *r128_freelist_get( drm_device_t *dev );
extern int r128_wait_ring( drm_r128_private_t *dev_priv, int n ); extern int r128_wait_ring( drm_r128_private_t *dev_priv, int n );
static __inline__ void
r128_update_ring_snapshot( drm_r128_ring_buffer_t *ring )
{
ring->space = (GET_RING_HEAD( ring ) - ring->tail) * sizeof(u32);
if ( ring->space <= 0 )
ring->space += ring->size;
}
extern int r128_do_cce_idle( drm_r128_private_t *dev_priv ); extern int r128_do_cce_idle( drm_r128_private_t *dev_priv );
extern int r128_do_cleanup_cce( drm_device_t *dev ); extern int r128_do_cleanup_cce( drm_device_t *dev );
extern int r128_do_cleanup_pageflip( drm_device_t *dev ); extern int r128_do_cleanup_pageflip( drm_device_t *dev );
...@@ -279,6 +267,7 @@ extern int r128_cce_indirect( DRM_IOCTL_ARGS ); ...@@ -279,6 +267,7 @@ extern int r128_cce_indirect( DRM_IOCTL_ARGS );
# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) # define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) # define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) # define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
# define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27)
#define R128_PM4_BUFFER_WM_CNTL 0x0708 #define R128_PM4_BUFFER_WM_CNTL 0x0708
# define R128_WMA_SHIFT 0 # define R128_WMA_SHIFT 0
...@@ -403,6 +392,15 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr); ...@@ -403,6 +392,15 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr);
(pkt) | ((n) << 16)) (pkt) | ((n) << 16))
static __inline__ void
r128_update_ring_snapshot( drm_r128_private_t *dev_priv )
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
ring->space = (GET_RING_HEAD( dev_priv ) - ring->tail) * sizeof(u32);
if ( ring->space <= 0 )
ring->space += ring->size;
}
/* ================================================================ /* ================================================================
* Misc helper macros * Misc helper macros
*/ */
...@@ -412,7 +410,7 @@ do { \ ...@@ -412,7 +410,7 @@ do { \
drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
if ( ring->space < ring->high_mark ) { \ if ( ring->space < ring->high_mark ) { \
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \
r128_update_ring_snapshot( ring ); \ r128_update_ring_snapshot( dev_priv ); \
if ( ring->space >= ring->high_mark ) \ if ( ring->space >= ring->high_mark ) \
goto __ring_space_done; \ goto __ring_space_done; \
DRM_UDELAY(1); \ DRM_UDELAY(1); \
...@@ -445,17 +443,10 @@ do { \ ...@@ -445,17 +443,10 @@ do { \
* Ring control * Ring control
*/ */
#if defined(__powerpc__)
#define r128_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
#else
#define r128_flush_write_combine() DRM_WRITEMEMORYBARRIER()
#endif
#define R128_VERBOSE 0 #define R128_VERBOSE 0
#define RING_LOCALS \ #define RING_LOCALS \
int write; unsigned int tail_mask; volatile u32 *ring; int write, _nr; unsigned int tail_mask; volatile u32 *ring;
#define BEGIN_RING( n ) do { \ #define BEGIN_RING( n ) do { \
if ( R128_VERBOSE ) { \ if ( R128_VERBOSE ) { \
...@@ -463,9 +454,10 @@ do { \ ...@@ -463,9 +454,10 @@ do { \
(n), __FUNCTION__ ); \ (n), __FUNCTION__ ); \
} \ } \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \
} \ } \
dev_priv->ring.space -= (n) * sizeof(u32); \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
ring = dev_priv->ring.start; \ ring = dev_priv->ring.start; \
write = dev_priv->ring.tail; \ write = dev_priv->ring.tail; \
tail_mask = dev_priv->ring.tail_mask; \ tail_mask = dev_priv->ring.tail_mask; \
...@@ -488,9 +480,23 @@ do { \ ...@@ -488,9 +480,23 @@ do { \
dev_priv->ring.start, \ dev_priv->ring.start, \
write * sizeof(u32) ); \ write * sizeof(u32) ); \
} \ } \
r128_flush_write_combine(); \ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
dev_priv->ring.tail = write; \ DRM_ERROR( \
R128_WRITE( R128_PM4_BUFFER_DL_WPTR, write ); \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \
} else \
dev_priv->ring.tail = write; \
} while (0)
#define COMMIT_RING() do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \
dev_priv->ring.tail ); \
} \
DRM_MEMORYBARRIER(); \
R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \
R128_READ( R128_PM4_BUFFER_DL_WPTR ); \
} while (0) } while (0)
#define OUT_RING( x ) do { \ #define OUT_RING( x ) do { \
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#include "r128_drm.h" #include "r128_drm.h"
#include "r128_drv.h" #include "r128_drv.h"
irqreturn_t r128_dma_service( DRM_IRQ_ARGS ) irqreturn_t r128_irq_handler( DRM_IRQ_ARGS )
{ {
drm_device_t *dev = (drm_device_t *) arg; drm_device_t *dev = (drm_device_t *) arg;
drm_r128_private_t *dev_priv = drm_r128_private_t *dev_priv =
......
...@@ -45,7 +45,7 @@ static void r128_emit_clip_rects( drm_r128_private_t *dev_priv, ...@@ -45,7 +45,7 @@ static void r128_emit_clip_rects( drm_r128_private_t *dev_priv,
RING_LOCALS; RING_LOCALS;
DRM_DEBUG( " %s\n", __FUNCTION__ ); DRM_DEBUG( " %s\n", __FUNCTION__ );
BEGIN_RING( 17 ); BEGIN_RING( (count < 3? count: 3) * 5 + 2 );
if ( count >= 1 ) { if ( count >= 1 ) {
OUT_RING( CCE_PACKET0( R128_AUX1_SC_LEFT, 3 ) ); OUT_RING( CCE_PACKET0( R128_AUX1_SC_LEFT, 3 ) );
...@@ -1011,7 +1011,7 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev, ...@@ -1011,7 +1011,7 @@ static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
count = depth->n; count = depth->n;
if (count > 4096 || count <= 0) if (count > 4096 || count <= 0)
return -EMSGSIZE; return -EMSGSIZE;
xbuf_size = count * sizeof(*x); xbuf_size = count * sizeof(*x);
...@@ -1280,6 +1280,7 @@ int r128_cce_clear( DRM_IOCTL_ARGS ) ...@@ -1280,6 +1280,7 @@ int r128_cce_clear( DRM_IOCTL_ARGS )
sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
r128_cce_dispatch_clear( dev, &clear ); r128_cce_dispatch_clear( dev, &clear );
COMMIT_RING();
/* Make sure we restore the 3D state next time. /* Make sure we restore the 3D state next time.
*/ */
...@@ -1315,8 +1316,10 @@ int r128_do_cleanup_pageflip( drm_device_t *dev ) ...@@ -1315,8 +1316,10 @@ int r128_do_cleanup_pageflip( drm_device_t *dev )
R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset ); R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset );
R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl );
if (dev_priv->current_page != 0) if (dev_priv->current_page != 0) {
r128_cce_dispatch_flip( dev ); r128_cce_dispatch_flip( dev );
COMMIT_RING();
}
dev_priv->page_flipping = 0; dev_priv->page_flipping = 0;
return 0; return 0;
...@@ -1341,6 +1344,7 @@ int r128_cce_flip( DRM_IOCTL_ARGS ) ...@@ -1341,6 +1344,7 @@ int r128_cce_flip( DRM_IOCTL_ARGS )
r128_cce_dispatch_flip( dev ); r128_cce_dispatch_flip( dev );
COMMIT_RING();
return 0; return 0;
} }
...@@ -1362,6 +1366,7 @@ int r128_cce_swap( DRM_IOCTL_ARGS ) ...@@ -1362,6 +1366,7 @@ int r128_cce_swap( DRM_IOCTL_ARGS )
dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT | dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
R128_UPLOAD_MASKS); R128_UPLOAD_MASKS);
COMMIT_RING();
return 0; return 0;
} }
...@@ -1421,6 +1426,7 @@ int r128_cce_vertex( DRM_IOCTL_ARGS ) ...@@ -1421,6 +1426,7 @@ int r128_cce_vertex( DRM_IOCTL_ARGS )
r128_cce_dispatch_vertex( dev, buf ); r128_cce_dispatch_vertex( dev, buf );
COMMIT_RING();
return 0; return 0;
} }
...@@ -1492,6 +1498,7 @@ int r128_cce_indices( DRM_IOCTL_ARGS ) ...@@ -1492,6 +1498,7 @@ int r128_cce_indices( DRM_IOCTL_ARGS )
r128_cce_dispatch_indices( dev, buf, elts.start, elts.end, count ); r128_cce_dispatch_indices( dev, buf, elts.start, elts.end, count );
COMMIT_RING();
return 0; return 0;
} }
...@@ -1501,6 +1508,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS ) ...@@ -1501,6 +1508,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS )
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_blit_t blit; drm_r128_blit_t blit;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp ); LOCK_TEST_WITH_RETURN( dev, filp );
...@@ -1518,7 +1526,10 @@ int r128_cce_blit( DRM_IOCTL_ARGS ) ...@@ -1518,7 +1526,10 @@ int r128_cce_blit( DRM_IOCTL_ARGS )
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv );
return r128_cce_dispatch_blit( filp, dev, &blit ); ret = r128_cce_dispatch_blit( filp, dev, &blit );
COMMIT_RING();
return ret;
} }
int r128_cce_depth( DRM_IOCTL_ARGS ) int r128_cce_depth( DRM_IOCTL_ARGS )
...@@ -1526,6 +1537,7 @@ int r128_cce_depth( DRM_IOCTL_ARGS ) ...@@ -1526,6 +1537,7 @@ int r128_cce_depth( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_depth_t depth; drm_r128_depth_t depth;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp ); LOCK_TEST_WITH_RETURN( dev, filp );
...@@ -1534,18 +1546,20 @@ int r128_cce_depth( DRM_IOCTL_ARGS ) ...@@ -1534,18 +1546,20 @@ int r128_cce_depth( DRM_IOCTL_ARGS )
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
ret = DRM_ERR(EINVAL);
switch ( depth.func ) { switch ( depth.func ) {
case R128_WRITE_SPAN: case R128_WRITE_SPAN:
return r128_cce_dispatch_write_span( dev, &depth ); ret = r128_cce_dispatch_write_span( dev, &depth );
case R128_WRITE_PIXELS: case R128_WRITE_PIXELS:
return r128_cce_dispatch_write_pixels( dev, &depth ); ret = r128_cce_dispatch_write_pixels( dev, &depth );
case R128_READ_SPAN: case R128_READ_SPAN:
return r128_cce_dispatch_read_span( dev, &depth ); ret = r128_cce_dispatch_read_span( dev, &depth );
case R128_READ_PIXELS: case R128_READ_PIXELS:
return r128_cce_dispatch_read_pixels( dev, &depth ); ret = r128_cce_dispatch_read_pixels( dev, &depth );
} }
return DRM_ERR(EINVAL); COMMIT_RING();
return ret;
} }
int r128_cce_stipple( DRM_IOCTL_ARGS ) int r128_cce_stipple( DRM_IOCTL_ARGS )
...@@ -1568,6 +1582,7 @@ int r128_cce_stipple( DRM_IOCTL_ARGS ) ...@@ -1568,6 +1582,7 @@ int r128_cce_stipple( DRM_IOCTL_ARGS )
r128_cce_dispatch_stipple( dev, mask ); r128_cce_dispatch_stipple( dev, mask );
COMMIT_RING();
return 0; return 0;
} }
...@@ -1643,6 +1658,7 @@ int r128_cce_indirect( DRM_IOCTL_ARGS ) ...@@ -1643,6 +1658,7 @@ int r128_cce_indirect( DRM_IOCTL_ARGS )
*/ */
r128_cce_dispatch_indirect( dev, buf, indirect.start, indirect.end ); r128_cce_dispatch_indirect( dev, buf, indirect.start, indirect.end );
COMMIT_RING();
return 0; return 0;
} }
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#define DRIVER_DATE "20020828" #define DRIVER_DATE "20020828"
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 9 #define DRIVER_MINOR 10
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
/* Interface history: /* Interface history:
...@@ -81,6 +81,9 @@ ...@@ -81,6 +81,9 @@
* Add 'GET' queries for starting additional clients on different VT's. * Add 'GET' queries for starting additional clients on different VT's.
* 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl. * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
* Add texture rectangle support for r100. * Add texture rectangle support for r100.
* 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
* clients use to tell the DRM where they think the framebuffer is
* located in the card's address space
*/ */
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
...@@ -106,10 +109,21 @@ ...@@ -106,10 +109,21 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_FREE)] = { radeon_mem_free, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FREE)] = { radeon_mem_free, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_INIT_HEAP)] = { radeon_mem_init_heap, 1, 1 }, \ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INIT_HEAP)] = { radeon_mem_init_heap, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_SETPARAM)] = { radeon_cp_setparam, 1, 0 }, \
#define DRIVER_FILE_FIELDS \
int64_t radeon_fb_delta; \
#define DRIVER_OPEN_HELPER( filp_priv, dev ) \
do { \
drm_radeon_private_t *dev_priv = dev->dev_private; \
if ( dev_priv ) \
filp_priv->radeon_fb_delta = dev_priv->fb_location; \
else \
filp_priv->radeon_fb_delta = 0; \
} while( 0 )
/* When a client dies: /* When a client dies:
* - Check for and clean up flipped page state * - Check for and clean up flipped page state
...@@ -142,7 +156,7 @@ do { \ ...@@ -142,7 +156,7 @@ do { \
/* DMA customization: /* DMA customization:
*/ */
#define __HAVE_DMA 1 #define __HAVE_DMA 1
#define __HAVE_DMA_IRQ 1 #define __HAVE_IRQ 1
#define __HAVE_VBL_IRQ 1 #define __HAVE_VBL_IRQ 1
#define __HAVE_SHARED_IRQ 1 #define __HAVE_SHARED_IRQ 1
......
...@@ -855,7 +855,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, ...@@ -855,7 +855,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
/* Initialize the memory controller */ /* Initialize the memory controller */
RADEON_WRITE( RADEON_MC_FB_LOCATION, RADEON_WRITE( RADEON_MC_FB_LOCATION,
(dev_priv->gart_vm_start - 1) & 0xffff0000 ); ( ( dev_priv->gart_vm_start - 1 ) & 0xffff0000 )
| ( dev_priv->fb_location >> 16 ) );
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) { if ( !dev_priv->is_pci ) {
...@@ -1071,13 +1072,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) ...@@ -1071,13 +1072,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->depth_offset = init->depth_offset; dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch; dev_priv->depth_pitch = init->depth_pitch;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch/64) << 22) |
(dev_priv->front_offset >> 10));
dev_priv->back_pitch_offset = (((dev_priv->back_pitch/64) << 22) |
(dev_priv->back_offset >> 10));
dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch/64) << 22) |
(dev_priv->depth_offset >> 10));
/* Hardware state for depth clears. Remove this if/when we no /* Hardware state for depth clears. Remove this if/when we no
* longer clear the depth buffer with a 3D rectangle. Hard-code * longer clear the depth buffer with a 3D rectangle. Hard-code
* all values to prevent unwanted 3D state from slipping through * all values to prevent unwanted 3D state from slipping through
...@@ -1124,13 +1118,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) ...@@ -1124,13 +1118,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
if(!dev_priv->fb) {
DRM_ERROR("could not find framebuffer!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset ); DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
if(!dev_priv->mmio) { if(!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n"); DRM_ERROR("could not find mmio region!\n");
...@@ -1204,9 +1191,26 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) ...@@ -1204,9 +1191,26 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->buffers->handle ); dev_priv->buffers->handle );
} }
dev_priv->fb_location = ( RADEON_READ( RADEON_MC_FB_LOCATION )
& 0xffff ) << 16;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch/64) << 22) |
( ( dev_priv->front_offset
+ dev_priv->fb_location ) >> 10 ) );
dev_priv->back_pitch_offset = (((dev_priv->back_pitch/64) << 22) |
( ( dev_priv->back_offset
+ dev_priv->fb_location ) >> 10 ) );
dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch/64) << 22) |
( ( dev_priv->depth_offset
+ dev_priv->fb_location ) >> 10 ) );
dev_priv->gart_size = init->gart_size; dev_priv->gart_size = init->gart_size;
dev_priv->gart_vm_start = RADEON_READ( RADEON_CONFIG_APER_SIZE ); dev_priv->gart_vm_start = dev_priv->fb_location
+ RADEON_READ( RADEON_CONFIG_APER_SIZE );
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) if ( !dev_priv->is_pci )
dev_priv->gart_buffers_offset = (dev_priv->buffers->offset dev_priv->gart_buffers_offset = (dev_priv->buffers->offset
...@@ -1271,12 +1275,12 @@ int radeon_do_cleanup_cp( drm_device_t *dev ) ...@@ -1271,12 +1275,12 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
{ {
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
#if _HAVE_DMA_IRQ #if __HAVE_IRQ
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
* is freed, it's too late. * is freed, it's too late.
*/ */
if ( dev->irq ) DRM(irq_uninstall)(dev); if ( dev->irq_enabled ) DRM(irq_uninstall)(dev);
#endif #endif
if ( dev->dev_private ) { if ( dev->dev_private ) {
......
...@@ -226,6 +226,13 @@ typedef union { ...@@ -226,6 +226,13 @@ typedef union {
#define RADEON_MAX_TEXTURE_LEVELS 12 #define RADEON_MAX_TEXTURE_LEVELS 12
#define RADEON_MAX_TEXTURE_UNITS 3 #define RADEON_MAX_TEXTURE_UNITS 3
/* Blits have strict offset rules. All blit offset must be aligned on
* a 1K-byte boundary.
*/
#define RADEON_OFFSET_SHIFT 10
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
#endif /* __RADEON_SAREA_DEFINES__ */ #endif /* __RADEON_SAREA_DEFINES__ */
typedef struct { typedef struct {
...@@ -365,31 +372,58 @@ typedef struct { ...@@ -365,31 +372,58 @@ typedef struct {
/* Radeon specific ioctls /* Radeon specific ioctls
* The device specific ioctl range is 0x40 to 0x79. * The device specific ioctl range is 0x40 to 0x79.
*/ */
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) #define DRM_RADEON_CP_INIT 0x00
#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) #define DRM_RADEON_CP_START 0x01
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) #define DRM_RADEON_CP_STOP 0x02
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) #define DRM_RADEON_CP_RESET 0x03
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) #define DRM_RADEON_CP_IDLE 0x04
#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) #define DRM_RADEON_RESET 0x05
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) #define DRM_RADEON_FULLSCREEN 0x06
#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) #define DRM_RADEON_SWAP 0x07
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) #define DRM_RADEON_CLEAR 0x08
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) #define DRM_RADEON_VERTEX 0x09
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) #define DRM_RADEON_INDICES 0x0A
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) #define DRM_RADEON_NOT_USED
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) #define DRM_RADEON_STIPPLE 0x0C
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) #define DRM_RADEON_INDIRECT 0x0D
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex2_t) #define DRM_RADEON_TEXTURE 0x0E
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t) #define DRM_RADEON_VERTEX2 0x0F
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t) #define DRM_RADEON_CMDBUF 0x10
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52) #define DRM_RADEON_GETPARAM 0x11
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR( 0x53, drm_radeon_mem_alloc_t) #define DRM_RADEON_FLIP 0x12
#define DRM_IOCTL_RADEON_FREE DRM_IOW( 0x54, drm_radeon_mem_free_t) #define DRM_RADEON_ALLOC 0x13
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( 0x55, drm_radeon_mem_init_heap_t) #define DRM_RADEON_FREE 0x14
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR( 0x56, drm_radeon_irq_emit_t) #define DRM_RADEON_INIT_HEAP 0x15
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( 0x57, drm_radeon_irq_wait_t) #define DRM_RADEON_IRQ_EMIT 0x16
/* added by Charl P. Botha - see radeon_cp.c for details */ #define DRM_RADEON_IRQ_WAIT 0x17
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO(0x58) #define DRM_RADEON_CP_RESUME 0x18
#define DRM_RADEON_SETPARAM 0x19
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
typedef struct drm_radeon_init { typedef struct drm_radeon_init {
enum { enum {
...@@ -502,7 +536,7 @@ typedef struct drm_radeon_tex_image { ...@@ -502,7 +536,7 @@ typedef struct drm_radeon_tex_image {
} drm_radeon_tex_image_t; } drm_radeon_tex_image_t;
typedef struct drm_radeon_texture { typedef struct drm_radeon_texture {
int offset; unsigned int offset;
int pitch; int pitch;
int format; int format;
int width; /* Texture image coordinates */ int width; /* Texture image coordinates */
...@@ -537,10 +571,11 @@ typedef struct drm_radeon_indirect { ...@@ -537,10 +571,11 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_STATUS_HANDLE 8 #define RADEON_PARAM_STATUS_HANDLE 8
#define RADEON_PARAM_SAREA_HANDLE 9 #define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_GART_TEX_HANDLE 10 #define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
typedef struct drm_radeon_getparam { typedef struct drm_radeon_getparam {
int param; int param;
int *value; void *value;
} drm_radeon_getparam_t; } drm_radeon_getparam_t;
/* 1.6: Set up a memory manager for regions of shared memory: /* 1.6: Set up a memory manager for regions of shared memory:
...@@ -578,4 +613,16 @@ typedef struct drm_radeon_irq_wait { ...@@ -578,4 +613,16 @@ typedef struct drm_radeon_irq_wait {
} drm_radeon_irq_wait_t; } drm_radeon_irq_wait_t;
/* 1.10: Clients tell the DRM where they think the framebuffer is located in
* the card's address space, via a new generic ioctl to set parameters
*/
typedef struct drm_radeon_setparam {
unsigned int param;
int64_t value;
} drm_radeon_setparam_t;
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
#endif #endif
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "drm_fops.h" #include "drm_fops.h"
#include "drm_init.h" #include "drm_init.h"
#include "drm_ioctl.h" #include "drm_ioctl.h"
#include "drm_irq.h"
#include "drm_lock.h" #include "drm_lock.h"
#include "drm_memory.h" #include "drm_memory.h"
#include "drm_proc.h" #include "drm_proc.h"
......
...@@ -73,6 +73,8 @@ typedef struct drm_radeon_private { ...@@ -73,6 +73,8 @@ typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring; drm_radeon_ring_buffer_t ring;
drm_radeon_sarea_t *sarea_priv; drm_radeon_sarea_t *sarea_priv;
u32 fb_location;
int gart_size; int gart_size;
u32 gart_vm_start; u32 gart_vm_start;
unsigned long gart_buffers_offset; unsigned long gart_buffers_offset;
...@@ -133,7 +135,6 @@ typedef struct drm_radeon_private { ...@@ -133,7 +135,6 @@ typedef struct drm_radeon_private {
unsigned long gart_textures_offset; unsigned long gart_textures_offset;
drm_local_map_t *sarea; drm_local_map_t *sarea;
drm_local_map_t *fb;
drm_local_map_t *mmio; drm_local_map_t *mmio;
drm_local_map_t *cp_ring; drm_local_map_t *cp_ring;
drm_local_map_t *ring_rptr; drm_local_map_t *ring_rptr;
...@@ -184,6 +185,7 @@ extern int radeon_cp_indirect( DRM_IOCTL_ARGS ); ...@@ -184,6 +185,7 @@ extern int radeon_cp_indirect( DRM_IOCTL_ARGS );
extern int radeon_cp_vertex2( DRM_IOCTL_ARGS ); extern int radeon_cp_vertex2( DRM_IOCTL_ARGS );
extern int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ); extern int radeon_cp_cmdbuf( DRM_IOCTL_ARGS );
extern int radeon_cp_getparam( DRM_IOCTL_ARGS ); extern int radeon_cp_getparam( DRM_IOCTL_ARGS );
extern int radeon_cp_setparam( DRM_IOCTL_ARGS );
extern int radeon_cp_flip( DRM_IOCTL_ARGS ); extern int radeon_cp_flip( DRM_IOCTL_ARGS );
extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
...@@ -239,6 +241,7 @@ extern void radeon_do_release(drm_device_t *dev); ...@@ -239,6 +241,7 @@ extern void radeon_do_release(drm_device_t *dev);
#define RADEON_CRTC2_OFFSET 0x0324 #define RADEON_CRTC2_OFFSET 0x0324
#define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_CRTC2_OFFSET_CNTL 0x0328
#define RADEON_RB3D_COLOROFFSET 0x1c40
#define RADEON_RB3D_COLORPITCH 0x1c48 #define RADEON_RB3D_COLORPITCH 0x1c48
#define RADEON_DP_GUI_MASTER_CNTL 0x146c #define RADEON_DP_GUI_MASTER_CNTL 0x146c
...@@ -332,6 +335,7 @@ extern void radeon_do_release(drm_device_t *dev); ...@@ -332,6 +335,7 @@ extern void radeon_do_release(drm_device_t *dev);
#define RADEON_PP_MISC 0x1c14 #define RADEON_PP_MISC 0x1c14
#define RADEON_PP_ROT_MATRIX_0 0x1d58 #define RADEON_PP_ROT_MATRIX_0 0x1d58
#define RADEON_PP_TXFILTER_0 0x1c54 #define RADEON_PP_TXFILTER_0 0x1c54
#define RADEON_PP_TXOFFSET_0 0x1c5c
#define RADEON_PP_TXFILTER_1 0x1c6c #define RADEON_PP_TXFILTER_1 0x1c6c
#define RADEON_PP_TXFILTER_2 0x1c84 #define RADEON_PP_TXFILTER_2 0x1c84
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
* tied to dma at all, this is just a hangover from dri prehistory. * tied to dma at all, this is just a hangover from dri prehistory.
*/ */
irqreturn_t DRM(dma_service)( DRM_IRQ_ARGS ) irqreturn_t DRM(irq_handler)( DRM_IRQ_ARGS )
{ {
drm_device_t *dev = (drm_device_t *) arg; drm_device_t *dev = (drm_device_t *) arg;
drm_radeon_private_t *dev_priv = drm_radeon_private_t *dev_priv =
......
This diff is collapsed.
...@@ -39,4 +39,14 @@ ...@@ -39,4 +39,14 @@
#define __HAVE_MTRR 1 #define __HAVE_MTRR 1
#define __HAVE_CTX_BITMAP 1 #define __HAVE_CTX_BITMAP 1
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "tdfx"
#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
#define DRIVER_DATE "20010216"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#endif #endif
...@@ -34,47 +34,6 @@ ...@@ -34,47 +34,6 @@
#include "tdfx.h" #include "tdfx.h"
#include "drmP.h" #include "drmP.h"
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "tdfx"
#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
#define DRIVER_DATE "20010216"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#ifndef PCI_VENDOR_ID_3DFX
#define PCI_VENDOR_ID_3DFX 0x121A
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO5
#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO4
#define PCI_DEVICE_ID_3DFX_VOODOO4 0x0007
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO3_3000 /* Voodoo3 3000 */
#define PCI_DEVICE_ID_3DFX_VOODOO3_3000 0x0005
#endif
#ifndef PCI_DEVICE_ID_3DFX_VOODOO3_2000 /* Voodoo3 3000 */
#define PCI_DEVICE_ID_3DFX_VOODOO3_2000 0x0004
#endif
#ifndef PCI_DEVICE_ID_3DFX_BANSHEE
#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003
#endif
static drm_pci_list_t DRM(idlist)[] = {
{ PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_BANSHEE },
{ PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO3_2000 },
{ PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO3_3000 },
{ PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO4 },
{ PCI_VENDOR_ID_3DFX, PCI_DEVICE_ID_3DFX_VOODOO5 },
{ 0, 0 }
};
#define DRIVER_CARD_LIST DRM(idlist)
#include "drm_auth.h" #include "drm_auth.h"
#include "drm_bufs.h" #include "drm_bufs.h"
#include "drm_context.h" #include "drm_context.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment