Commit 16f09d1d authored by Linus Torvalds's avatar Linus Torvalds Committed by Paul Mackerras

DRI CVS update

 - Pass in the "device" to remapping functions
 - add radeon CP resume ioctl
 - remove warnings and stale DMA debug code
parent b7d25ef4
......@@ -225,16 +225,16 @@ static inline struct page * vmalloc_to_page(void * vmalloc_addr)
if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
/* Mapping helper macros */
#define DRM_IOREMAP(map) \
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
#define DRM_IOREMAP(map, dev) \
(map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) )
#define DRM_IOREMAP_NOCACHE(map) \
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size)
#define DRM_IOREMAP_NOCACHE(map, dev) \
(map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev))
#define DRM_IOREMAPFREE(map) \
#define DRM_IOREMAPFREE(map, dev) \
do { \
if ( (map)->handle && (map)->size ) \
DRM(ioremapfree)( (map)->handle, (map)->size ); \
DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \
} while (0)
#define DRM_FIND_MAP(_map, _o) \
......@@ -652,9 +652,9 @@ extern void DRM(free)(void *pt, size_t size, int area);
extern unsigned long DRM(alloc_pages)(int order, int area);
extern void DRM(free_pages)(unsigned long address, int order,
int area);
extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size);
extern void DRM(ioremapfree)(void *pt, unsigned long size);
extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev);
extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev);
extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev);
#if __REALLY_HAVE_AGP
extern struct agp_memory *DRM(alloc_agp)(int pages, u32 type);
......
......@@ -123,7 +123,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
MTRR_TYPE_WRCOMB, 1 );
}
#endif
map->handle = DRM(ioremap)( map->offset, map->size );
map->handle = DRM(ioremap)( map->offset, map->size, dev );
break;
case _DRM_SHM:
......@@ -245,7 +245,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
DRM(ioremapfree)(map->handle, map->size);
DRM(ioremapfree)(map->handle, map->size, dev);
break;
case _DRM_SHM:
vfree(map->handle);
......
......@@ -453,7 +453,7 @@ static int DRM(takedown)( drm_device_t *dev )
DRM_DEBUG( "mtrr_del=%d\n", retcode );
}
#endif
DRM(ioremapfree)( map->handle, map->size );
DRM(ioremapfree)( map->handle, map->size, dev );
break;
case _DRM_SHM:
vfree(map->handle);
......
......@@ -38,6 +38,151 @@
*/
#define DEBUG_MEMORY 0
#if __REALLY_HAVE_AGP
#include <linux/vmalloc.h>
#ifdef HAVE_PAGE_AGP
#include <asm/agp.h>
#else
# ifdef __powerpc__
# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
# else
# define PAGE_AGP PAGE_KERNEL
# endif
#endif
#include <asm/tlbflush.h>
/*
* Find the drm_map that covers the range [offset, offset+size).
*/
static inline drm_map_t *
drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
{
struct list_head *list;
drm_map_list_t *r_list;
drm_map_t *map;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
map = r_list->map;
if (!map)
continue;
if (map->offset <= offset && (offset + size) <= (map->offset + map->size))
return map;
}
return NULL;
}
static inline void *
agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
{
unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size))
break;
if (!agpmem)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(num_pages * sizeof(struct page *));
if (!page_map)
return NULL;
phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
for (i = 0; i < num_pages; ++i)
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
if (!addr)
return NULL;
flush_tlb_kernel_range((unsigned long) addr, (unsigned long) addr + size);
return addr;
}
static inline unsigned long
drm_follow_page (void *vaddr)
{
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr);
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
return pte_pfn(*ptep) << PAGE_SHIFT;
}
#endif /* __REALLY_HAVE_AGP */
static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
{
#if __REALLY_HAVE_AGP
if (dev->agp && dev->agp->cant_use_aperture) {
drm_map_t *map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP)
return agp_remap(offset, size, dev);
}
#endif
return ioremap(offset, size);
}
static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t *dev)
{
#if __REALLY_HAVE_AGP
if (dev->agp && dev->agp->cant_use_aperture) {
drm_map_t *map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP)
return agp_remap(offset, size, dev);
}
#endif
return ioremap_nocache(offset, size);
}
static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
{
#if __REALLY_HAVE_AGP
/*
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
* routines for handling mappings in the AGP space. Hopefully this can be done in
* a future revision of the interface...
*/
if (dev->agp && dev->agp->cant_use_aperture
&& ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END))
{
unsigned long offset;
drm_map_t *map;
offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK);
map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP) {
vunmap(pt);
return;
}
}
#endif
iounmap(pt);
}
#if DEBUG_MEMORY
#include "drm_memory_debug.h"
......@@ -118,19 +263,19 @@ void DRM(free_pages)(unsigned long address, int order, int area)
free_pages(address, order);
}
void *DRM(ioremap)(unsigned long offset, unsigned long size)
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
{
return ioremap(offset, size);
return drm_ioremap(offset, size, dev);
}
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
{
return ioremap_nocache(offset, size);
return drm_ioremap_nocache(offset, size, dev);
}
void DRM(ioremapfree)(void *pt, unsigned long size)
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
{
iounmap(pt);
drm_ioremapfree(pt, size, dev);
}
#if __REALLY_HAVE_AGP
......
......@@ -269,7 +269,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
}
}
void *DRM(ioremap)(unsigned long offset, unsigned long size)
void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
{
void *pt;
......@@ -279,7 +279,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
return NULL;
}
if (!(pt = ioremap(offset, size))) {
if (!(pt = drm_ioremap(offset, size, dev))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&DRM(mem_lock));
......@@ -292,7 +292,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
return pt;
}
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
{
void *pt;
......@@ -302,7 +302,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
return NULL;
}
if (!(pt = ioremap_nocache(offset, size))) {
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&DRM(mem_lock));
......@@ -315,7 +315,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
return pt;
}
void DRM(ioremapfree)(void *pt, unsigned long size)
void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
{
int alloc_count;
int free_count;
......@@ -324,7 +324,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
iounmap(pt);
drm_ioremapfree(pt, size, dev);
spin_lock(&DRM(mem_lock));
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
......
......@@ -378,8 +378,6 @@ static int DRM(clients_info)(char *buf, char **start, off_t offset,
#if DRM_DEBUG_CODE
#define DRM_VMA_VERBOSE 0
static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
......@@ -387,13 +385,6 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
int len = 0;
drm_vma_entry_t *pt;
struct vm_area_struct *vma;
#if DRM_VMA_VERBOSE
unsigned long i;
unsigned long address;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
#endif
#if defined(__i386__)
unsigned int pgprot;
#endif
......
......@@ -107,12 +107,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
agpmem->memory->memory[offset] &= dev->agp->page_mask;
page = virt_to_page(__va(agpmem->memory->memory[offset]));
get_page(page);
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
baddr, __va(agpmem->memory->memory[offset]), offset);
DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
baddr, __va(agpmem->memory->memory[offset]), offset,
atomic_read(&page->count));
return page;
}
......@@ -206,7 +206,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
DRM(ioremapfree)(map->handle, map->size);
DRM(ioremapfree)(map->handle, map->size, dev);
break;
case _DRM_SHM:
vfree(map->handle);
......@@ -380,7 +380,16 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
if ( !priv->authenticated ) return -EACCES;
if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
/* We check for "dma". On Apple's UniNorth, it's valid to have
* the AGP mapped at physical address 0
* --BenH.
*/
if (!VM_OFFSET(vma)
#if __REALLY_HAVE_AGP
&& (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
#endif
)
return DRM(mmap_dma)(filp, vma);
/* A sequential search of a linked list is
fine here because: 1) there will only be
......@@ -406,7 +415,7 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vma->vm_flags &= VM_MAYWRITE;
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
......@@ -420,15 +429,19 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
switch (map->type) {
case _DRM_AGP:
#if defined(__alpha__)
#if __REALLY_HAVE_AGP
if (dev->agp->cant_use_aperture) {
/*
* On Alpha we can't talk to bus dma address from the
* CPU, so for memory of type DRM_AGP, we'll deal with
* sorting out the real physical pages and mappings
* in nopage()
* On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in nopage()
*/
#if defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
#endif
vma->vm_ops = &DRM(vm_ops);
break;
}
#endif
/* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
......@@ -439,15 +452,15 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
}
#elif defined(__ia64__)
if (map->type != _DRM_AGP)
vma->vm_page_prot =
pgprot_writecombine(vma->vm_page_prot);
#elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
}
#if defined(__ia64__)
if (map->type != _DRM_AGP)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
#endif
offset = DRIVER_GET_REG_OFS();
#ifdef __sparc__
if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
......
......@@ -614,7 +614,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
} else {
DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
DRM_IOREMAP( dev_priv->buffers );
DRM_IOREMAP( dev_priv->buffers, dev );
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
pgt = buf->address;
......@@ -653,7 +653,7 @@ int gamma_do_cleanup_dma( drm_device_t *dev )
drm_gamma_private_t *dev_priv = dev->dev_private;
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
DRM_IOREMAPFREE( dev_priv->buffers, dev );
DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
DRM_MEM_DRIVER );
......
......@@ -246,7 +246,7 @@ int i810_dma_cleanup(drm_device_t *dev)
if(dev_priv->ring.virtual_start) {
DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
dev_priv->ring.Size);
dev_priv->ring.Size, dev);
}
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
......@@ -263,7 +263,7 @@ int i810_dma_cleanup(drm_device_t *dev)
drm_buf_t *buf = dma->buflist[ i ];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if ( buf_priv->kernel_virtual && buf->total )
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
}
}
return 0;
......@@ -333,7 +333,7 @@ static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
*buf_priv->in_use = I810_BUF_FREE;
buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
buf->total);
buf->total, dev);
}
return 0;
}
......@@ -386,7 +386,7 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
init->ring_start,
init->ring_size);
init->ring_size, dev);
if (dev_priv->ring.virtual_start == NULL) {
dev->dev_private = (void *) dev_priv;
......
......@@ -246,7 +246,7 @@ int i830_dma_cleanup(drm_device_t *dev)
if (dev_priv->ring.virtual_start) {
DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
dev_priv->ring.Size);
dev_priv->ring.Size, dev);
}
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
......@@ -264,7 +264,7 @@ int i830_dma_cleanup(drm_device_t *dev)
drm_buf_t *buf = dma->buflist[ i ];
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
if ( buf_priv->kernel_virtual && buf->total )
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
}
}
return 0;
......@@ -340,7 +340,7 @@ static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
*buf_priv->in_use = I830_BUF_FREE;
buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
buf->total);
buf->total, dev);
}
return 0;
}
......@@ -394,7 +394,7 @@ static int i830_dma_initialize(drm_device_t *dev,
dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
init->ring_start,
init->ring_size);
init->ring_size, dev);
if (dev_priv->ring.virtual_start == NULL) {
dev->dev_private = (void *) dev_priv;
......
......@@ -554,9 +554,9 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
DRM_IOREMAP( dev_priv->warp );
DRM_IOREMAP( dev_priv->primary );
DRM_IOREMAP( dev_priv->buffers );
DRM_IOREMAP( dev_priv->warp, dev );
DRM_IOREMAP( dev_priv->primary, dev );
DRM_IOREMAP( dev_priv->buffers, dev );
if(!dev_priv->warp->handle ||
!dev_priv->primary->handle ||
......@@ -651,11 +651,11 @@ int mga_do_cleanup_dma( drm_device_t *dev )
drm_mga_private_t *dev_priv = dev->dev_private;
if ( dev_priv->warp != NULL )
DRM_IOREMAPFREE( dev_priv->warp );
DRM_IOREMAPFREE( dev_priv->warp, dev );
if ( dev_priv->primary != NULL )
DRM_IOREMAPFREE( dev_priv->primary );
DRM_IOREMAPFREE( dev_priv->primary, dev );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
DRM_IOREMAPFREE( dev_priv->buffers, dev );
if ( dev_priv->head != NULL ) {
mga_freelist_cleanup( dev );
......
......@@ -226,7 +226,7 @@ do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
(n), __FUNCTION__ ); \
DRM_INFO( " space=0x%x req=0x%x\n", \
DRM_INFO( " space=0x%x req=0x%Zx\n", \
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
} \
prim = dev_priv->prim.start; \
......@@ -276,7 +276,7 @@ do { \
#define DMA_WRITE( offset, val ) \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04x\n", \
DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
(u32)(val), write + (offset) * sizeof(u32) ); \
} \
*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
......
......@@ -350,8 +350,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
entry->busaddr[page_ofs]);
DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
entry->busaddr[page_ofs],
DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
(unsigned long) entry->busaddr[page_ofs],
entry->handle + tmp_ofs );
}
......@@ -539,10 +539,11 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
(drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
#if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) {
DRM_IOREMAP( dev_priv->cce_ring );
DRM_IOREMAP( dev_priv->ring_rptr );
DRM_IOREMAP( dev_priv->buffers );
DRM_IOREMAP( dev_priv->cce_ring, dev );
DRM_IOREMAP( dev_priv->ring_rptr, dev );
DRM_IOREMAP( dev_priv->buffers, dev );
if(!dev_priv->cce_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev_priv->buffers->handle) {
......@@ -551,7 +552,9 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
r128_do_cleanup_cce( dev );
return DRM_ERR(ENOMEM);
}
} else {
} else
#endif
{
dev_priv->cce_ring->handle =
(void *)dev_priv->cce_ring->offset;
dev_priv->ring_rptr->handle =
......@@ -625,23 +628,22 @@ int r128_do_cleanup_cce( drm_device_t *dev )
if ( dev->dev_private ) {
drm_r128_private_t *dev_priv = dev->dev_private;
#if __REALLY_HAVE_SG
#if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) {
#endif
if ( dev_priv->cce_ring != NULL )
DRM_IOREMAPFREE( dev_priv->cce_ring );
DRM_IOREMAPFREE( dev_priv->cce_ring, dev );
if ( dev_priv->ring_rptr != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr );
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
#if __REALLY_HAVE_SG
} else {
DRM_IOREMAPFREE( dev_priv->buffers, dev );
} else
#endif
{
if (!DRM(ati_pcigart_cleanup)( dev,
dev_priv->phys_pci_gart,
dev_priv->bus_pci_gart ))
DRM_ERROR( "failed to cleanup PCI GART!\n" );
}
#endif
DRM(free)( dev->dev_private, sizeof(drm_r128_private_t),
DRM_MEM_DRIVER );
......
......@@ -79,6 +79,7 @@
* R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
* 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
* Add 'GET' queries for starting additional clients on different VT's.
* Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
*/
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
......@@ -87,6 +88,7 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESUME)] = { radeon_cp_resume, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \
......
......@@ -1151,10 +1151,11 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
(drm_radeon_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
#if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) {
DRM_IOREMAP( dev_priv->cp_ring );
DRM_IOREMAP( dev_priv->ring_rptr );
DRM_IOREMAP( dev_priv->buffers );
DRM_IOREMAP( dev_priv->cp_ring, dev );
DRM_IOREMAP( dev_priv->ring_rptr, dev );
DRM_IOREMAP( dev_priv->buffers, dev );
if(!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev_priv->buffers->handle) {
......@@ -1163,7 +1164,9 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
} else {
} else
#endif
{
dev_priv->cp_ring->handle =
(void *)dev_priv->cp_ring->offset;
dev_priv->ring_rptr->handle =
......@@ -1210,7 +1213,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
#if __REALLY_HAVE_SG
if ( dev_priv->is_pci ) {
if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart,
&dev_priv->bus_pci_gart)) {
......@@ -1240,15 +1242,12 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
RADEON_WRITE( RADEON_MC_AGP_LOCATION, 0xffffffc0 ); /* ?? */
RADEON_WRITE( RADEON_AGP_COMMAND, 0 ); /* clear AGP_COMMAND */
} else {
#endif /* __REALLY_HAVE_SG */
/* Turn off PCI GART
*/
tmp = RADEON_READ( RADEON_AIC_CNTL )
& ~RADEON_PCIGART_TRANSLATE_EN;
RADEON_WRITE( RADEON_AIC_CNTL, tmp );
#if __REALLY_HAVE_SG
}
#endif /* __REALLY_HAVE_SG */
radeon_cp_load_microcode( dev_priv );
radeon_cp_init_ring_buffer( dev, dev_priv );
......@@ -1277,20 +1276,21 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
if ( dev->dev_private ) {
drm_radeon_private_t *dev_priv = dev->dev_private;
#if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) {
if ( dev_priv->cp_ring != NULL )
DRM_IOREMAPFREE( dev_priv->cp_ring );
DRM_IOREMAPFREE( dev_priv->cp_ring, dev );
if ( dev_priv->ring_rptr != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr );
DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
} else {
#if __REALLY_HAVE_SG
DRM_IOREMAPFREE( dev_priv->buffers, dev );
} else
#endif
{
if (!DRM(ati_pcigart_cleanup)( dev,
dev_priv->phys_pci_gart,
dev_priv->bus_pci_gart ))
DRM_ERROR( "failed to cleanup PCI GART!\n" );
#endif /* __REALLY_HAVE_SG */
}
DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t),
......@@ -1301,6 +1301,176 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
return 0;
}
/* This code will reinit the Radeon CP hardware after a resume from disc.
* AFAIK, it would be very difficult to pickle the state at suspend time, so
* here we make sure that all Radeon hardware initialisation is re-done without
* affecting running applications. This function is called radeon_do_resume_cp()
* as it was derived from radeon_init_cp, where most of the initialisation takes
* place during DRI init.
*
* This patch is NOT to be confused with my and Michel Daenzer's earlier DRI
* reinit work, which de- and re-initialised the complete DRI at every VT
* switch.
*
* Charl P. Botha <http://cpbotha.net>
*/
static int radeon_do_resume_cp( drm_device_t *dev)
{
drm_radeon_private_t *dev_priv;
u32 tmp;
DRM_DEBUG( "\n" );
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
/* get the existing dev_private */
dev_priv = dev->dev_private;
#if !defined(PCIGART_ENABLED)
/* PCI support is not 100% working, so we disable it here.
*/
if ( dev_priv->is_pci ) {
DRM_ERROR( "PCI GART not yet supported for Radeon!\n" );
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
#endif
if ( dev_priv->is_pci && !dev->sg ) {
DRM_ERROR( "PCI GART memory not allocated!\n" );
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if ( dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT ) {
DRM_DEBUG( "TIMEOUT problem!\n" );
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if ( ( dev_priv->cp_mode != RADEON_CSQ_PRIBM_INDDIS ) &&
( dev_priv->cp_mode != RADEON_CSQ_PRIBM_INDBM ) ) {
DRM_DEBUG( "BAD cp_mode (%x)!\n", dev_priv->cp_mode );
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if(!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if(!dev_priv->fb) {
DRM_ERROR("could not find framebuffer!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if(!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if(!dev_priv->cp_ring) {
DRM_ERROR("could not find cp ring region!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if(!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if(!dev_priv->buffers) {
DRM_ERROR("could not find dma buffer region!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
if ( !dev_priv->is_pci ) {
if(!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
}
if ( !dev_priv->is_pci ) {
if(!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev_priv->buffers->handle) {
DRM_ERROR("could not find ioremap agp regions!\n");
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
} else {
DRM_DEBUG( "dev_priv->cp_ring->handle %p\n",
dev_priv->cp_ring->handle );
DRM_DEBUG( "dev_priv->ring_rptr->handle %p\n",
dev_priv->ring_rptr->handle );
DRM_DEBUG( "dev_priv->buffers->handle %p\n",
dev_priv->buffers->handle );
}
DRM_DEBUG( "dev_priv->agp_size %d\n",
dev_priv->agp_size );
DRM_DEBUG( "dev_priv->agp_vm_start 0x%x\n",
dev_priv->agp_vm_start );
DRM_DEBUG( "dev_priv->agp_buffers_offset 0x%lx\n",
dev_priv->agp_buffers_offset );
#if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) {
/* Turn off PCI GART
*/
tmp = RADEON_READ( RADEON_AIC_CNTL )
& ~RADEON_PCIGART_TRANSLATE_EN;
RADEON_WRITE( RADEON_AIC_CNTL, tmp );
} else
#endif
{
/* I'm not so sure about this ati_picgart_init after at resume-time... */
if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart,
&dev_priv->bus_pci_gart)) {
DRM_ERROR( "failed to init PCI GART!\n" );
radeon_do_cleanup_cp(dev);
return DRM_ERR(ENOMEM);
}
tmp = RADEON_READ( RADEON_AIC_CNTL )
| RADEON_PCIGART_TRANSLATE_EN;
RADEON_WRITE( RADEON_AIC_CNTL, tmp );
/* set PCI GART page-table base address
*/
RADEON_WRITE( RADEON_AIC_PT_BASE, dev_priv->bus_pci_gart );
/* set address range for PCI address translate
*/
RADEON_WRITE( RADEON_AIC_LO_ADDR, dev_priv->agp_vm_start );
RADEON_WRITE( RADEON_AIC_HI_ADDR, dev_priv->agp_vm_start
+ dev_priv->agp_size - 1);
/* Turn off AGP aperture -- is this required for PCIGART?
*/
RADEON_WRITE( RADEON_MC_AGP_LOCATION, 0xffffffc0 ); /* ?? */
RADEON_WRITE( RADEON_AGP_COMMAND, 0 ); /* clear AGP_COMMAND */
}
radeon_cp_load_microcode( dev_priv );
radeon_cp_init_ring_buffer( dev, dev_priv );
radeon_do_engine_reset( dev );
return 0;
}
int radeon_cp_init( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
......@@ -1456,6 +1626,16 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
return radeon_do_cp_idle( dev_priv );
}
/* Added by Charl P. Botha to call radeon_do_resume_cp().
*/
int radeon_cp_resume( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
return radeon_do_resume_cp(dev);
}
int radeon_engine_reset( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
......
......@@ -385,6 +385,8 @@ typedef struct {
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( 0x55, drm_radeon_mem_init_heap_t)
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR( 0x56, drm_radeon_irq_emit_t)
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( 0x57, drm_radeon_irq_wait_t)
/* added by Charl P. Botha - see radeon_cp.c for details */
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO(0x58)
typedef struct drm_radeon_init {
enum {
......
......@@ -159,6 +159,7 @@ extern int radeon_cp_start( DRM_IOCTL_ARGS );
extern int radeon_cp_stop( DRM_IOCTL_ARGS );
extern int radeon_cp_reset( DRM_IOCTL_ARGS );
extern int radeon_cp_idle( DRM_IOCTL_ARGS );
extern int radeon_cp_resume( DRM_IOCTL_ARGS );
extern int radeon_engine_reset( DRM_IOCTL_ARGS );
extern int radeon_fullscreen( DRM_IOCTL_ARGS );
extern int radeon_cp_buffers( DRM_IOCTL_ARGS );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment