Commit bc4ff576 authored by Linus Torvalds's avatar Linus Torvalds

Merge with DRI CVS tree: remove stale old context switching code and

DMA histogramming. Be more careful about DMA page-list allocations,
and remove old and broken (not SMP-safe, and unused) DRM read(), write()
and poll() support.
parent ea1499aa
......@@ -39,7 +39,7 @@ config DRM_R128
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && AGP
depends on DRM
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
......@@ -56,8 +56,14 @@ config DRM_I810
for this driver to work.
config DRM_I830
tristate "Intel 830M"
tristate "Intel 830M, 845G, 852GM, 855GM, 865G"
depends on DRM && AGP
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM or 865G integrated graphics. If M is selected, the
module will be called i830. AGP support is required for this driver
to work.
config DRM_MGA
tristate "Matrox g200/g400"
......
......@@ -95,9 +95,6 @@
#ifndef __HAVE_DMA_FREELIST
#define __HAVE_DMA_FREELIST 0
#endif
#ifndef __HAVE_DMA_HISTOGRAM
#define __HAVE_DMA_HISTOGRAM 0
#endif
#define __REALLY_HAVE_AGP (__HAVE_AGP && (defined(CONFIG_AGP) || \
defined(CONFIG_AGP_MODULE)))
......@@ -119,7 +116,6 @@
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01
#define DRM_FLAG_NOCTX 0x02
#define DRM_MEM_DMA 0
#define DRM_MEM_SAREA 1
......@@ -339,38 +335,11 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
int dev_priv_size; /* Size of buffer private stoarge */
void *dev_private; /* Per-buffer private storage */
} drm_buf_t;
#if DRM_DMA_HISTOGRAM
#define DRM_DMA_HISTOGRAM_SLOTS 9
#define DRM_DMA_HISTOGRAM_INITIAL 10
#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
typedef struct drm_histogram {
atomic_t total;
atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS];
} drm_histogram_t;
#endif
/* bufs is one longer than it has to be */
typedef struct drm_waitlist {
......@@ -422,6 +391,7 @@ typedef struct drm_file {
struct drm_file *prev;
struct drm_device *dev;
int remove_auth_on_close;
unsigned long lock_count;
} drm_file_t;
......@@ -451,21 +421,6 @@ typedef struct drm_lock_data {
} drm_lock_data_t;
typedef struct drm_device_dma {
#if 0
/* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */
atomic_t total_dmas; /* Total DMA buffers dispatched */
atomic_t total_missed_dma; /* Missed drm_do_dma */
atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
atomic_t total_missed_free; /* Missed drm_free_this_buffer */
atomic_t total_missed_sched;/* Missed drm_dma_schedule */
atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */
#endif
drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count;
......@@ -573,7 +528,6 @@ typedef struct drm_device {
/* Memory management */
drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */
drm_map_t **context_sareas;
int max_context;
......@@ -608,9 +562,6 @@ typedef struct drm_device {
#endif
cycles_t ctx_start;
cycles_t lck_start;
#if __HAVE_DMA_HISTOGRAM
drm_histogram_t histo;
#endif
/* Callback to X server for context switch
and for heavy-handed reset. */
......@@ -666,13 +617,7 @@ extern int DRM(unlock)(struct inode *inode, struct file *filp,
extern int DRM(open_helper)(struct inode *inode, struct file *filp,
drm_device_t *dev);
extern int DRM(flush)(struct file *filp);
extern int DRM(release_fuck)(struct inode *inode, struct file *filp);
extern int DRM(fasync)(int fd, struct file *filp, int on);
extern ssize_t DRM(read)(struct file *filp, char *buf, size_t count,
loff_t *off);
extern int DRM(write_string)(drm_device_t *dev, const char *s);
extern unsigned int DRM(poll)(struct file *filp,
struct poll_table_struct *wait);
/* Mapping support (drm_vm.h) */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
......@@ -693,6 +638,8 @@ extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
extern int DRM(mmap_dma)(struct file *filp,
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
extern unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait);
extern ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off);
/* Memory management support (drm_memory.h) */
extern void DRM(mem_init)(void);
......@@ -701,8 +648,6 @@ extern int DRM(mem_info)(char *buf, char **start, off_t offset,
extern void *DRM(alloc)(size_t size, int area);
extern void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size,
int area);
extern char *DRM(strdup)(const char *s, int area);
extern void DRM(strfree)(const char *s, int area);
extern void DRM(free)(void *pt, size_t size, int area);
extern unsigned long DRM(alloc_pages)(int order, int area);
extern void DRM(free_pages)(unsigned long address, int order,
......@@ -777,12 +722,11 @@ extern int DRM(getmagic)(struct inode *inode, struct file *filp,
extern int DRM(authmagic)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Placeholder for ioctls past */
extern int DRM(noop)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Locking IOCTL support (drm_lock.h) */
extern int DRM(block)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(unblock)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(lock_take)(__volatile__ unsigned int *lock,
unsigned int context);
extern int DRM(lock_transfer)(drm_device_t *dev,
......@@ -822,15 +766,6 @@ extern int DRM(dma_setup)(drm_device_t *dev);
extern void DRM(dma_takedown)(drm_device_t *dev);
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
extern void DRM(reclaim_buffers)( struct file *filp );
#if __HAVE_OLD_DMA
/* GH: This is a dirty hack for now...
*/
extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
#endif
#if __HAVE_DMA_IRQ
extern int DRM(control)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
......@@ -850,26 +785,8 @@ extern void DRM(vbl_send_signals)( drm_device_t *dev );
#if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( void *dev );
#endif
#endif
#if DRM_DMA_HISTOGRAM
extern int DRM(histogram_slot)(unsigned long count);
extern void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf);
#endif
/* Buffer list support (drm_lists.h) */
#if __HAVE_DMA_WAITLIST
extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
#endif
#if __HAVE_DMA_FREELIST
extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
extern int DRM(freelist_destroy)(drm_freelist_t *bl);
extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif
#endif /* __HAVE_DMA */
#if __REALLY_HAVE_AGP
......
......@@ -146,7 +146,7 @@ int DRM(agp_alloc)(struct inode *inode, struct file *filp,
return -ENOMEM;
}
entry->handle = (unsigned long)memory->memory;
entry->handle = (unsigned long)memory->key;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
......@@ -186,6 +186,7 @@ int DRM(agp_unbind)(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_agp_binding_t request;
drm_agp_mem_t *entry;
int ret;
if (!dev->agp || !dev->agp->acquired) return -EINVAL;
if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
......@@ -193,7 +194,10 @@ int DRM(agp_unbind)(struct inode *inode, struct file *filp,
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
if (!entry->bound) return -EINVAL;
return DRM(unbind_agp)(entry->memory);
ret = DRM(unbind_agp)(entry->memory);
if (ret == 0)
entry->bound = 0;
return ret;
}
int DRM(agp_bind)(struct inode *inode, struct file *filp,
......
......@@ -128,7 +128,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
case _DRM_SHM:
map->handle = vmalloc_32(map->size);
DRM_DEBUG( "%ld %d %p\n",
DRM_DEBUG( "%lu %d %p\n",
map->size, DRM(order)( map->size ), map->handle );
if ( !map->handle ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
......@@ -269,10 +269,12 @@ static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
if (entry->seglist[i]) {
DRM(free_pages)(entry->seglist[i],
entry->page_order,
DRM_MEM_DMA);
}
}
DRM(free)(entry->seglist,
entry->seg_count *
sizeof(*entry->seglist),
......@@ -281,9 +283,9 @@ static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
entry->seg_count = 0;
}
if(entry->buf_count) {
for(i = 0; i < entry->buf_count; i++) {
if(entry->buflist[i].dev_private) {
if (entry->buf_count) {
for (i = 0; i < entry->buf_count; i++) {
if (entry->buflist[i].dev_private) {
DRM(free)(entry->buflist[i].dev_private,
entry->buflist[i].dev_priv_size,
DRM_MEM_BUFS);
......@@ -345,7 +347,7 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
......@@ -412,15 +414,12 @@ int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
DRM(cleanup_buf_error)(entry);
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
memset( buf->dev_private, 0, buf->dev_priv_size );
#if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
......@@ -565,12 +564,13 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
}
memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
temp_pagelist = DRM(realloc)( dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
/* Keep the original pagelist until we know all the allocations
* have succeeded
*/
temp_pagelist = DRM(alloc)( (dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES );
if(!temp_pagelist) {
if (!temp_pagelist) {
DRM(free)( entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS );
......@@ -581,8 +581,9 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
dma->pagelist = temp_pagelist;
memcpy(temp_pagelist,
dma->pagelist,
dma->page_count * sizeof(*dma->pagelist));
DRM_DEBUG( "pagelist: %d entries\n",
dma->page_count + (count << page_order) );
......@@ -593,13 +594,25 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
while ( entry->buf_count < count ) {
page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
if ( !page ) break;
if ( !page ) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
DRM(cleanup_buf_error)(entry);
DRM(free)( temp_pagelist,
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES );
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
entry->seglist[entry->seg_count++] = page;
for ( i = 0 ; i < (1 << page_order) ; i++ ) {
DRM_DEBUG( "page %d @ 0x%08lx\n",
dma->page_count + page_count,
page + PAGE_SIZE * i );
dma->pagelist[dma->page_count + page_count++]
temp_pagelist[dma->page_count + page_count++]
= page + PAGE_SIZE * i;
}
for ( offset = 0 ;
......@@ -617,12 +630,25 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
buf->filp = 0;
#if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
DRM_MEM_BUFS );
if(!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
DRM(cleanup_buf_error)(entry);
DRM(free)( temp_pagelist,
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES );
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
}
memset( buf->dev_private, 0, buf->dev_priv_size );
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
}
......@@ -634,9 +660,13 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS );
if(!temp_buflist) {
if (!temp_buflist) {
/* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry);
DRM(free)( temp_pagelist,
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES );
up( &dev->struct_sem );
atomic_dec( &dev->buf_alloc );
return -ENOMEM;
......@@ -647,6 +677,14 @@ int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
/* No allocations failed, so now we can replace the orginal pagelist
* with the new one.
*/
DRM(free)(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
DRM_MEM_PAGES);
dma->pagelist = temp_pagelist;
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
......@@ -715,7 +753,7 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
......@@ -789,12 +827,6 @@ int DRM(addbufs_sg)( struct inode *inode, struct file *filp,
memset( buf->dev_private, 0, buf->dev_priv_size );
# if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
# endif
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
......
......@@ -35,7 +35,10 @@
#include "drmP.h"
#if __HAVE_CTX_BITMAP
#if !__HAVE_CTX_BITMAP
#error "__HAVE_CTX_BITMAP must be defined"
#endif
/* ================================================================
* Context bitmap support
......@@ -221,16 +224,11 @@ int DRM(setsareactx)(struct inode *inode, struct file *filp,
int DRM(context_switch)( drm_device_t *dev, int old, int new )
{
char buf[64];
if ( test_and_set_bit( 0, &dev->context_flag ) ) {
DRM_ERROR( "Reentering -- FIXME\n" );
return -EBUSY;
}
#if __HAVE_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG( "Context switch from %d to %d\n", old, new );
......@@ -239,13 +237,6 @@ int DRM(context_switch)( drm_device_t *dev, int old, int new )
return 0;
}
if ( DRM(flags) & DRM_FLAG_NOCTX ) {
DRM(context_switch_complete)( dev, new );
} else {
sprintf( buf, "C %d %d\n", old, new );
DRM(write_string)( dev, buf );
}
return 0;
}
......@@ -261,11 +252,6 @@ int DRM(context_switch_complete)( drm_device_t *dev, int new )
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if __HAVE_DMA_HISTOGRAM
atomic_inc( &dev->histo.ctx[DRM(histogram_slot)(get_cycles()
- dev->ctx_start)] );
#endif
clear_bit( 0, &dev->context_flag );
wake_up( &dev->context_wait );
......@@ -406,375 +392,3 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
return 0;
}
#else /* __HAVE_CTX_BITMAP */
/* ================================================================
* Old-style context support
*/
int DRM(context_switch)(drm_device_t *dev, int old, int new)
{
char buf[64];
drm_queue_t *q;
#if 0
atomic_inc(&dev->total_ctx);
#endif
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
#if __HAVE_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new >= dev->queue_count) {
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
q = dev->queuelist[new];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
atomic_dec(&q->use_count);
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
if (DRM(flags) & DRM_FLAG_NOCTX) {
DRM(context_switch_complete)(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
DRM(write_string)(dev, buf);
}
atomic_dec(&q->use_count);
return 0;
}
int DRM(context_switch_complete)(drm_device_t *dev, int new)
{
drm_device_dma_t *dma = dev->dma;
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("Cannot free lock\n");
}
}
#if __HAVE_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[DRM(histogram_slot)(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
wake_up_interruptible(&dev->context_wait);
return 0;
}
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
{
DRM_DEBUG("\n");
if (atomic_read(&q->use_count) != 1
|| atomic_read(&q->finalization)
|| atomic_read(&q->block_count)) {
DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count));
}
atomic_set(&q->finalization, 0);
atomic_set(&q->block_count, 0);
atomic_set(&q->block_read, 0);
atomic_set(&q->block_write, 0);
atomic_set(&q->total_queued, 0);
atomic_set(&q->total_flushed, 0);
atomic_set(&q->total_locks, 0);
init_waitqueue_head(&q->write_queue);
init_waitqueue_head(&q->read_queue);
init_waitqueue_head(&q->flush_queue);
q->flags = ctx->flags;
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
return 0;
}
/* drm_alloc_queue:
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
disappear (so all deallocation must be done after IOCTLs are off)
2) dev->queue_count < dev->queue_slots
3) dev->queuelist[i].use_count == 0 and
dev->queuelist[i].finalization == 0 if i not in use
POST: 1) dev->queuelist[i].use_count == 1
2) dev->queue_count < dev->queue_slots */
static int DRM(alloc_queue)(drm_device_t *dev)
{
int i;
drm_queue_t *queue;
int oldslots;
int newslots;
/* Check for a free queue */
for (i = 0; i < dev->queue_count; i++) {
atomic_inc(&dev->queuelist[i]->use_count);
if (atomic_read(&dev->queuelist[i]->use_count) == 1
&& !atomic_read(&dev->queuelist[i]->finalization)) {
DRM_DEBUG("%d (free)\n", i);
return i;
}
atomic_dec(&dev->queuelist[i]->use_count);
}
/* Allocate a new queue */
down(&dev->struct_sem);
queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
++dev->queue_count;
if (dev->queue_count >= dev->queue_slots) {
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
if (!dev->queue_slots) dev->queue_slots = 1;
dev->queue_slots *= 2;
newslots = dev->queue_slots * sizeof(*dev->queuelist);
dev->queuelist = DRM(realloc)(dev->queuelist,
oldslots,
newslots,
DRM_MEM_QUEUES);
if (!dev->queuelist) {
up(&dev->struct_sem);
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
}
dev->queuelist[dev->queue_count-1] = queue;
up(&dev->struct_sem);
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
return dev->queue_count - 1;
}
int DRM(resctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
int DRM(addctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
ctx.handle = DRM(alloc_queue)(dev);
}
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(modctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
if (DRM_BUFCOUNT(&q->waitlist)) {
atomic_dec(&q->use_count);
return -EBUSY;
}
q->flags = ctx.flags;
atomic_dec(&q->use_count);
return 0;
}
int DRM(getctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
ctx.flags = q->flags;
atomic_dec(&q->use_count);
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(switchctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
}
int DRM(newctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
DRM(context_switch_complete)(dev, ctx.handle);
return 0;
}
int DRM(rmctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
drm_buf_t *buf;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
atomic_inc(&q->finalization); /* Mark queue in finalization state */
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
finalization) */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
/* Remove queued buffers */
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
DRM(free_buffer)(dev, buf);
}
clear_bit(0, &dev->interrupt_flag);
/* Wakeup blocked processes */
wake_up_interruptible(&q->read_queue);
wake_up_interruptible(&q->write_queue);
wake_up_interruptible(&q->flush_queue);
/* Finalization over. Queue is made
available when both use_count and
finalization become 0, which won't
happen until all the waiting processes
stop waiting. */
atomic_dec(&q->finalization);
return 0;
}
#endif /* __HAVE_CTX_BITMAP */
......@@ -83,18 +83,20 @@ void DRM(dma_takedown)(drm_device_t *dev)
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
DRM(free_pages)(dma->bufs[i].seglist[j],
dma->bufs[i].page_order,
DRM_MEM_DMA);
}
}
DRM(free)(dma->bufs[i].seglist,
dma->bufs[i].seg_count
* sizeof(*dma->bufs[0].seglist),
DRM_MEM_SEGS);
}
if(dma->bufs[i].buf_count) {
for(j = 0; j < dma->bufs[i].buf_count; j++) {
if(dma->bufs[i].buflist[j].dev_private) {
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
if (dma->bufs[i].buflist[j].dev_private) {
DRM(free)(dma->bufs[i].buflist[j].dev_private,
dma->bufs[i].buflist[j].dev_priv_size,
DRM_MEM_BUFS);
......@@ -126,61 +128,6 @@ void DRM(dma_takedown)(drm_device_t *dev)
}
#if __HAVE_DMA_HISTOGRAM
/* This is slow, but is useful for debugging. */
int DRM(histogram_slot)(unsigned long count)
{
int value = DRM_DMA_HISTOGRAM_INITIAL;
int slot;
for (slot = 0;
slot < DRM_DMA_HISTOGRAM_SLOTS;
++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
if (count < value) return slot;
}
return DRM_DMA_HISTOGRAM_SLOTS - 1;
}
void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf)
{
cycles_t queued_to_dispatched;
cycles_t dispatched_to_completed;
cycles_t completed_to_freed;
int q2d, d2c, c2f, q2c, q2f;
if (buf->time_queued) {
queued_to_dispatched = (buf->time_dispatched
- buf->time_queued);
dispatched_to_completed = (buf->time_completed
- buf->time_dispatched);
completed_to_freed = (buf->time_freed
- buf->time_completed);
q2d = DRM(histogram_slot)(queued_to_dispatched);
d2c = DRM(histogram_slot)(dispatched_to_completed);
c2f = DRM(histogram_slot)(completed_to_freed);
q2c = DRM(histogram_slot)(queued_to_dispatched
+ dispatched_to_completed);
q2f = DRM(histogram_slot)(queued_to_dispatched
+ dispatched_to_completed
+ completed_to_freed);
atomic_inc(&dev->histo.total);
atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
atomic_inc(&dev->histo.completed_to_freed[c2f]);
atomic_inc(&dev->histo.queued_to_completed[q2c]);
atomic_inc(&dev->histo.queued_to_freed[q2f]);
}
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
}
#endif
void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
{
......@@ -190,9 +137,6 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
buf->pending = 0;
buf->filp = 0;
buf->used = 0;
#if __HAVE_DMA_HISTOGRAM
buf->time_completed = get_cycles();
#endif
if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
wake_up_interruptible(&buf->dma_wait);
......@@ -237,276 +181,6 @@ void DRM(reclaim_buffers)( struct file *filp )
#endif
/* GH: This is a big hack for now...
*/
#if __HAVE_OLD_DMA
void DRM(clear_next_buffer)(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
dma->next_buffer = NULL;
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
wake_up_interruptible(&dma->next_queue->flush_queue);
}
dma->next_queue = NULL;
}
int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
{
int i;
int candidate = -1;
int j = jiffies;
if (!dev) {
DRM_ERROR("No device\n");
return -1;
}
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
/* This only happens between the time the
interrupt is initialized and the time
the queues are initialized. */
return -1;
}
/* Doing "while locked" DMA? */
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
return DRM_KERNEL_CONTEXT;
}
/* If there are buffers on the last_context
queue, and we have not been executing
this context very long, continue to
execute this context. */
if (dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j
&& DRM_WAITCOUNT(dev, dev->last_context)) {
return dev->last_context;
}
/* Otherwise, find a candidate */
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
if (candidate < 0) {
for (i = 0; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
}
if (wrapper
&& candidate >= 0
&& candidate != dev->last_context
&& dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j) {
if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
del_timer(&dev->timer);
dev->timer.function = wrapper;
dev->timer.data = (unsigned long)dev;
dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
add_timer(&dev->timer);
}
return -1;
}
return candidate;
}
int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i;
drm_queue_t *q;
drm_buf_t *buf;
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
DECLARE_WAITQUEUE(entry, current);
DRM_DEBUG("%d\n", d->send_count);
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
int context = dev->lock.hw_lock->lock;
if (!_DRM_LOCK_IS_HELD(context)) {
DRM_ERROR("No lock held during \"while locked\""
" request\n");
return -EINVAL;
}
if (d->context != _DRM_LOCKING_CONTEXT(context)
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
DRM_ERROR("Lock held by %d while %d makes"
" \"while locked\" request\n",
_DRM_LOCKING_CONTEXT(context),
d->context);
return -EINVAL;
}
q = dev->queuelist[DRM_KERNEL_CONTEXT];
while_locked = 1;
} else {
q = dev->queuelist[d->context];
}
atomic_inc(&q->use_count);
if (atomic_read(&q->block_write)) {
add_wait_queue(&q->write_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!atomic_read(&q->block_write)) break;
schedule();
if (signal_pending(current)) {
atomic_dec(&q->use_count);
remove_wait_queue(&q->write_queue, &entry);
return -EINTR;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->write_queue, &entry);
}
for (i = 0; i < d->send_count; i++) {
idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
atomic_dec(&q->use_count);
DRM_ERROR("Index %d (of %d max)\n",
d->send_indices[i], dma->buf_count - 1);
return -EINVAL;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
return -EINVAL;
}
if (buf->list != DRM_LIST_NONE) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer %d on list %d\n",
current->pid, buf->idx, buf->list);
}
buf->used = d->send_sizes[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return -EINVAL;
}
if (buf->waiting) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return -EINVAL;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
|| atomic_read(&q->finalization)) {
DRM(free_buffer)(dev, buf);
} else {
DRM(waitlist_put)(&q->waitlist, buf);
atomic_inc(&q->total_queued);
}
}
atomic_dec(&q->use_count);
return 0;
}
static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
int order)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i;
drm_buf_t *buf;
drm_device_dma_t *dma = dev->dma;
for (i = d->granted_count; i < d->request_count; i++) {
buf = DRM(freelist_get)(&dma->bufs[order].freelist,
d->flags & _DRM_DMA_WAIT);
if (!buf) break;
if (buf->pending || buf->waiting) {
DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
buf->idx,
buf->filp,
buf->waiting,
buf->pending);
}
buf->filp = filp;
if (copy_to_user(&d->request_indices[i],
&buf->idx,
sizeof(buf->idx)))
return -EFAULT;
if (copy_to_user(&d->request_sizes[i],
&buf->total,
sizeof(buf->total)))
return -EFAULT;
++d->granted_count;
}
return 0;
}
int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
{
int order;
int retcode = 0;
int tmp_order;
order = DRM(order)(dma->request_size);
dma->granted_count = 0;
retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
for (tmp_order = order - 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order >= DRM_MIN_ORDER;
--tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order);
}
}
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
for (tmp_order = order + 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order <= DRM_MAX_ORDER;
++tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order);
}
}
return 0;
}
#endif /* __HAVE_OLD_DMA */
#if __HAVE_DMA_IRQ
......
......@@ -84,8 +84,11 @@
#ifndef __HAVE_SG
#define __HAVE_SG 0
#endif
#ifndef __HAVE_KERNEL_CTX_SWITCH
#define __HAVE_KERNEL_CTX_SWITCH 0
#ifndef __HAVE_DRIVER_FOPS_READ
#define __HAVE_DRIVER_FOPS_READ 0
#endif
#ifndef __HAVE_DRIVER_FOPS_POLL
#define __HAVE_DRIVER_FOPS_POLL 0
#endif
#ifndef DRIVER_PREINIT
......@@ -121,9 +124,9 @@ static struct file_operations DRM(fops) = { \
.release = DRM(release), \
.ioctl = DRM(ioctl), \
.mmap = DRM(mmap), \
.read = DRM(read), \
.fasync = DRM(fasync), \
.poll = DRM(poll), \
.read = DRM(read), \
}
#endif
......@@ -167,8 +170,8 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
......@@ -192,7 +195,13 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
#if __HAVE_DMA_FLUSH
/* Gamma only, really */
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
#else
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
#endif
#if __HAVE_DMA
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
......@@ -307,7 +316,6 @@ static int DRM(setup)( drm_device_t *dev )
if(dev->maplist == NULL) return -ENOMEM;
memset(dev->maplist, 0, sizeof(*dev->maplist));
INIT_LIST_HEAD(&dev->maplist->head);
dev->map_count = 0;
dev->vmalist = NULL;
dev->sigdata.lock = dev->lock.hw_lock = NULL;
......@@ -475,7 +483,9 @@ static int DRM(takedown)( drm_device_t *dev )
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
if ( dev->queuelist ) {
for ( i = 0 ; i < dev->queue_count ; i++ ) {
#if __HAVE_DMA_WAITLIST
DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
#endif
if ( dev->queuelist[i] ) {
DRM(free)( dev->queuelist[i],
sizeof(*dev->queuelist[0]),
......@@ -764,7 +774,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
current->pid, (long)dev->device, dev->open_count );
if ( dev->lock.hw_lock &&
if ( priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
dev->lock.filp == filp ) {
DRM_DEBUG( "File %p released, freeing lock for context %d\n",
......@@ -782,7 +792,7 @@ int DRM(release)( struct inode *inode, struct file *filp )
server. */
}
#if __HAVE_RELEASE
else if ( dev->lock.hw_lock ) {
else if ( priv->lock_count && dev->lock.hw_lock ) {
/* The lock is required to reclaim buffers */
DECLARE_WAITQUEUE( entry, current );
......@@ -802,9 +812,6 @@ int DRM(release)( struct inode *inode, struct file *filp )
break; /* Got lock */
}
/* Contention */
#if 0
atomic_inc( &dev->total_sleeps );
#endif
schedule();
if ( signal_pending( current ) ) {
retcode = -ERESTARTSYS;
......@@ -925,11 +932,8 @@ int DRM(lock)( struct inode *inode, struct file *filp,
#if __HAVE_MULTIPLE_DMA_QUEUES
drm_queue_t *q;
#endif
#if __HAVE_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
++priv->lock_count;
if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
return -EFAULT;
......@@ -1008,20 +1012,11 @@ int DRM(lock)( struct inode *inode, struct file *filp,
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
DRIVER_DMA_QUIESCENT();
}
#endif
#if __HAVE_KERNEL_CTX_SWITCH
if ( dev->last_context != lock.context ) {
DRM(context_switch)(dev, dev->last_context,
lock.context);
}
#endif
}
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
#if __HAVE_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
#endif
return ret;
}
......@@ -1044,25 +1039,6 @@ int DRM(unlock)( struct inode *inode, struct file *filp,
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
#if __HAVE_KERNEL_CTX_SWITCH
/* We no longer really hold it, but if we are the next
* agent to request it then we should just be able to
* take it immediately and not eat the ioctl.
*/
dev->lock.filp = 0;
{
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
unsigned int old, new, prev, ctx;
ctx = lock.context;
do {
old = *plock;
new = ctx;
prev = cmpxchg(plock, old, new);
} while (prev != old);
}
wake_up_interruptible(&dev->lock.lock_queue);
#else
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT );
#if __HAVE_DMA_SCHEDULE
......@@ -1077,7 +1053,6 @@ int DRM(unlock)( struct inode *inode, struct file *filp,
DRM_ERROR( "\n" );
}
}
#endif /* !__HAVE_KERNEL_CTX_SWITCH */
unblock_all_signals();
return 0;
......
......@@ -56,6 +56,7 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev)
priv->dev = dev;
priv->ioctl_count = 0;
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
down(&dev->struct_sem);
if (!dev->file_last) {
......@@ -111,98 +112,17 @@ int DRM(fasync)(int fd, struct file *filp, int on)
return 0;
}
/* The drm_read and drm_write_string code (especially that which manages
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int left;
int avail;
int send;
int cur;
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
interruptible_sleep_on(&dev->buf_readers);
if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n");
return -ERESTARTSYS;
}
DRM_DEBUG(" awake\n");
}
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
send = DRM_MIN(avail, count);
while (send) {
if (dev->buf_wp > dev->buf_rp) {
cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
} else {
cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
}
if (copy_to_user(buf, dev->buf_rp, cur))
return -EFAULT;
dev->buf_rp += cur;
if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
send -= cur;
}
wake_up_interruptible(&dev->buf_writers);
return DRM_MIN(avail, count);;
}
int DRM(write_string)(drm_device_t *dev, const char *s)
#if !__HAVE_DRIVER_FOPS_POLL
unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait)
{
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
int send = strlen(s);
int count;
DRM_DEBUG("%d left, %d to send (%p, %p)\n",
left, send, dev->buf_rp, dev->buf_wp);
if (left == 1 || dev->buf_wp != dev->buf_rp) {
DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
left,
dev->buf_wp,
dev->buf_rp);
}
while (send) {
if (dev->buf_wp >= dev->buf_rp) {
count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
if (count == left) --count; /* Leave a hole */
} else {
count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
}
strncpy(dev->buf_wp, s, count);
dev->buf_wp += count;
if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
send -= count;
}
if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
DRM_DEBUG("waking\n");
wake_up_interruptible(&dev->buf_readers);
return 0;
}
#endif
unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
poll_wait(filp, &dev->buf_readers, wait);
if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
#if !__HAVE_DRIVER_FOPS_READ
ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
{
return 0;
}
#endif
......@@ -49,11 +49,6 @@ static void DRM(parse_option)(char *s)
for (c = s; *c && *c != ':'; c++); /* find : or \0 */
if (*c) r = c + 1; else r = NULL; /* remember remainder */
*c = '\0'; /* terminate */
if (!strcmp(s, "noctx")) {
DRM(flags) |= DRM_FLAG_NOCTX;
DRM_INFO("Server-mediated context switching OFF\n");
return;
}
if (!strcmp(s, "debug")) {
DRM(flags) |= DRM_FLAG_DEBUG;
DRM_INFO("Debug messages ON\n");
......
......@@ -196,7 +196,7 @@ int DRM(getmap)( struct inode *inode, struct file *filp,
idx = map.offset;
down(&dev->struct_sem);
if (idx < 0 || idx >= dev->map_count) {
if (idx < 0) {
up(&dev->struct_sem);
return -EINVAL;
}
......
......@@ -31,19 +31,13 @@
#include "drmP.h"
int DRM(block)(struct inode *inode, struct file *filp, unsigned int cmd,
int DRM(noop)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
DRM_DEBUG("\n");
return 0;
}
int DRM(unblock)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
DRM_DEBUG("\n");
return 0;
}
int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
{
......@@ -108,113 +102,6 @@ int DRM(lock_free)(drm_device_t *dev,
return 0;
}
static int DRM(flush_queue)(drm_device_t *dev, int context)
{
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
atomic_inc(&q->block_write);
add_wait_queue(&q->flush_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!DRM_BUFCOUNT(&q->waitlist)) break;
schedule();
if (signal_pending(current)) {
ret = -EINTR; /* Can't restart */
break;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->flush_queue, &entry);
}
atomic_dec(&q->use_count);
/* NOTE: block_write is still incremented!
Use drm_flush_unlock_queue to decrement. */
return ret;
}
static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
{
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
if (atomic_read(&q->block_write)) {
atomic_dec(&q->block_write);
wake_up_interruptible(&q->write_queue);
}
}
atomic_dec(&q->use_count);
return 0;
}
int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_queue)(dev, i);
}
}
return ret;
}
int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_unblock_queue)(dev, i);
}
}
return ret;
}
int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int ret = 0;
drm_lock_t lock;
DRM_DEBUG("\n");
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
DRM(flush_unblock)(dev, lock.context, lock.flags);
return ret;
}
/* If we get here, it means that the process has called DRM_IOCTL_LOCK
without calling DRM_IOCTL_UNLOCK.
......
......@@ -32,186 +32,47 @@
#include <linux/config.h>
#include "drmP.h"
typedef struct drm_mem_stats {
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
} drm_mem_stats_t;
/* Cut down version of drm_memory_debug.h, which used to be called
* drm_memory.h. If you want the debug functionality, change 0 to 1
* below.
*/
#define DEBUG_MEMORY 0
static spinlock_t DRM(mem_lock) = SPIN_LOCK_UNLOCKED;
static unsigned long DRM(ram_available) = 0; /* In pages */
static unsigned long DRM(ram_used) = 0;
static drm_mem_stats_t DRM(mem_stats)[] = {
[DRM_MEM_DMA] = { "dmabufs" },
[DRM_MEM_SAREA] = { "sareas" },
[DRM_MEM_DRIVER] = { "driver" },
[DRM_MEM_MAGIC] = { "magic" },
[DRM_MEM_IOCTLS] = { "ioctltab" },
[DRM_MEM_MAPS] = { "maplist" },
[DRM_MEM_VMAS] = { "vmalist" },
[DRM_MEM_BUFS] = { "buflist" },
[DRM_MEM_SEGS] = { "seglist" },
[DRM_MEM_PAGES] = { "pagelist" },
[DRM_MEM_FILES] = { "files" },
[DRM_MEM_QUEUES] = { "queues" },
[DRM_MEM_CMDS] = { "commands" },
[DRM_MEM_MAPPINGS] = { "mappings" },
[DRM_MEM_BUFLISTS] = { "buflists" },
[DRM_MEM_AGPLISTS] = { "agplist" },
[DRM_MEM_SGLISTS] = { "sglist" },
[DRM_MEM_TOTALAGP] = { "totalagp" },
[DRM_MEM_BOUNDAGP] = { "boundagp" },
[DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
[DRM_MEM_STUB] = { "stub" },
{ NULL, 0, } /* Last entry must be null */
};
#if DEBUG_MEMORY
#include "drm_memory_debug.h"
#else
void DRM(mem_init)(void)
{
drm_mem_stats_t *mem;
struct sysinfo si;
for (mem = DRM(mem_stats); mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->bytes_allocated = 0;
mem->bytes_freed = 0;
}
si_meminfo(&si);
DRM(ram_available) = si.totalram;
DRM(ram_used) = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
static int DRM(_mem_info)(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
drm_mem_stats_t *pt;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*eof = 0;
*start = &buf[offset];
DRM_PROC_PRINT(" total counts "
" | outstanding \n");
DRM_PROC_PRINT("type alloc freed fail bytes freed"
" | allocs bytes\n\n");
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
"system", 0, 0, 0,
DRM(ram_available) << (PAGE_SHIFT - 10));
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
"locked", 0, 0, 0, DRM(ram_used) >> 10);
DRM_PROC_PRINT("\n");
for (pt = DRM(mem_stats); pt->name; pt++) {
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
pt->name,
pt->succeed_count,
pt->free_count,
pt->fail_count,
pt->bytes_allocated,
pt->bytes_freed,
pt->succeed_count - pt->free_count,
(long)pt->bytes_allocated
- (long)pt->bytes_freed);
}
if (len > request + offset) return request;
*eof = 1;
return len - offset;
}
int DRM(mem_info)(char *buf, char **start, off_t offset,
int len, int *eof, void *data)
{
int ret;
spin_lock(&DRM(mem_lock));
ret = DRM(_mem_info)(buf, start, offset, len, eof, data);
spin_unlock(&DRM(mem_lock));
return ret;
return 0;
}
void *DRM(alloc)(size_t size, int area)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
return NULL;
}
if (!(pt = kmalloc(size, GFP_KERNEL))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_allocated += size;
spin_unlock(&DRM(mem_lock));
return pt;
return kmalloc(size, GFP_KERNEL);
}
void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
{
void *pt;
if (!(pt = DRM(alloc)(size, area))) return NULL;
if (!(pt = kmalloc(size, GFP_KERNEL))) return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
DRM(free)(oldpt, oldsize, area);
kfree(oldpt);
}
return pt;
}
char *DRM(strdup)(const char *s, int area)
{
char *pt;
int length = s ? strlen(s) : 0;
if (!(pt = DRM(alloc)(length+1, area))) return NULL;
strcpy(pt, s);
return pt;
}
void DRM(strfree)(const char *s, int area)
{
unsigned int size;
if (!s) return;
size = 1 + (s ? strlen(s) : 0);
DRM(free)((void *)s, size, area);
}
void DRM(free)(void *pt, size_t size, int area)
{
int alloc_count;
int free_count;
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else kfree(pt);
spin_lock(&DRM(mem_lock));
DRM(mem_stats)[area].bytes_freed += size;
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
kfree(pt);
}
unsigned long DRM(alloc_pages)(int order, int area)
......@@ -221,29 +82,11 @@ unsigned long DRM(alloc_pages)(int order, int area)
unsigned long addr;
unsigned int sz;
spin_lock(&DRM(mem_lock));
if ((DRM(ram_used) >> PAGE_SHIFT)
> (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
spin_unlock(&DRM(mem_lock));
return 0;
}
spin_unlock(&DRM(mem_lock));
address = __get_free_pages(GFP_KERNEL, order);
if (!address) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].fail_count;
spin_unlock(&DRM(mem_lock));
if (!address)
return 0;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_allocated += bytes;
DRM(ram_used) += bytes;
spin_unlock(&DRM(mem_lock));
/* Zero outside the lock */
/* Zero */
memset((void *)address, 0, bytes);
/* Reserve */
......@@ -259,207 +102,56 @@ unsigned long DRM(alloc_pages)(int order, int area)
void DRM(free_pages)(unsigned long address, int order, int area)
{
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
unsigned long addr;
unsigned int sz;
if (!address) {
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
} else {
if (!address)
return;
/* Unreserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
free_pages(address, order);
}
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_freed += bytes;
DRM(ram_used) -= bytes;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
free_pages(address, order);
}
void *DRM(ioremap)(unsigned long offset, unsigned long size)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = ioremap(offset, size))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&DRM(mem_lock));
return pt;
return ioremap(offset, size);
}
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = ioremap_nocache(offset, size))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&DRM(mem_lock));
return pt;
return ioremap_nocache(offset, size);
}
void DRM(ioremapfree)(void *pt, unsigned long size)
{
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
iounmap(pt);
spin_lock(&DRM(mem_lock));
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
#if __REALLY_HAVE_AGP
agp_memory *DRM(alloc_agp)(int pages, u32 type)
{
agp_memory *handle;
if (!pages) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
return NULL;
}
if ((handle = DRM(agp_allocate_memory)(pages, type))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+= pages << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
return handle;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
return DRM(agp_allocate_memory)(pages, type);
}
int DRM(free_agp)(agp_memory *handle, int pages)
{
int alloc_count;
int free_count;
int retval = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Attempt to free NULL AGP handle\n");
return retval;;
}
if (DRM(agp_free_memory)(handle)) {
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+= pages << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
return 0;
}
return retval;
return DRM(agp_free_memory)(handle) ? 0 : -EINVAL;
}
int DRM(bind_agp)(agp_memory *handle, unsigned int start)
{
int retcode = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Attempt to bind NULL AGP handle\n");
return retcode;
}
if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
return retcode;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
spin_unlock(&DRM(mem_lock));
return retcode;
return DRM(agp_bind_memory)(handle, start);
}
int DRM(unbind_agp)(agp_memory *handle)
{
int alloc_count;
int free_count;
int retcode = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Attempt to unbind NULL AGP handle\n");
return retcode;
}
if ((retcode = DRM(agp_unbind_memory)(handle))) return retcode;
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
return retcode;
return DRM(agp_unbind_memory)(handle);
}
#endif
#endif /* agp */
#endif /* debug_memory */
/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
#include "drmP.h"
typedef struct drm_mem_stats {
const char *name;
int succeed_count;
int free_count;
int fail_count;
unsigned long bytes_allocated;
unsigned long bytes_freed;
} drm_mem_stats_t;
static spinlock_t DRM(mem_lock) = SPIN_LOCK_UNLOCKED;
static unsigned long DRM(ram_available) = 0; /* In pages */
static unsigned long DRM(ram_used) = 0;
static drm_mem_stats_t DRM(mem_stats)[] = {
[DRM_MEM_DMA] = { "dmabufs" },
[DRM_MEM_SAREA] = { "sareas" },
[DRM_MEM_DRIVER] = { "driver" },
[DRM_MEM_MAGIC] = { "magic" },
[DRM_MEM_IOCTLS] = { "ioctltab" },
[DRM_MEM_MAPS] = { "maplist" },
[DRM_MEM_VMAS] = { "vmalist" },
[DRM_MEM_BUFS] = { "buflist" },
[DRM_MEM_SEGS] = { "seglist" },
[DRM_MEM_PAGES] = { "pagelist" },
[DRM_MEM_FILES] = { "files" },
[DRM_MEM_QUEUES] = { "queues" },
[DRM_MEM_CMDS] = { "commands" },
[DRM_MEM_MAPPINGS] = { "mappings" },
[DRM_MEM_BUFLISTS] = { "buflists" },
[DRM_MEM_AGPLISTS] = { "agplist" },
[DRM_MEM_SGLISTS] = { "sglist" },
[DRM_MEM_TOTALAGP] = { "totalagp" },
[DRM_MEM_BOUNDAGP] = { "boundagp" },
[DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
[DRM_MEM_STUB] = { "stub" },
{ NULL, 0, } /* Last entry must be null */
};
void DRM(mem_init)(void)
{
drm_mem_stats_t *mem;
struct sysinfo si;
for (mem = DRM(mem_stats); mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
mem->bytes_allocated = 0;
mem->bytes_freed = 0;
}
si_meminfo(&si);
DRM(ram_available) = si.totalram;
DRM(ram_used) = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
static int DRM(_mem_info)(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
drm_mem_stats_t *pt;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*eof = 0;
*start = &buf[offset];
DRM_PROC_PRINT(" total counts "
" | outstanding \n");
DRM_PROC_PRINT("type alloc freed fail bytes freed"
" | allocs bytes\n\n");
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
"system", 0, 0, 0,
DRM(ram_available) << (PAGE_SHIFT - 10));
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
"locked", 0, 0, 0, DRM(ram_used) >> 10);
DRM_PROC_PRINT("\n");
for (pt = DRM(mem_stats); pt->name; pt++) {
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
pt->name,
pt->succeed_count,
pt->free_count,
pt->fail_count,
pt->bytes_allocated,
pt->bytes_freed,
pt->succeed_count - pt->free_count,
(long)pt->bytes_allocated
- (long)pt->bytes_freed);
}
if (len > request + offset) return request;
*eof = 1;
return len - offset;
}
int DRM(mem_info)(char *buf, char **start, off_t offset,
int len, int *eof, void *data)
{
int ret;
spin_lock(&DRM(mem_lock));
ret = DRM(_mem_info)(buf, start, offset, len, eof, data);
spin_unlock(&DRM(mem_lock));
return ret;
}
void *DRM(alloc)(size_t size, int area)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
return NULL;
}
if (!(pt = kmalloc(size, GFP_KERNEL))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_allocated += size;
spin_unlock(&DRM(mem_lock));
return pt;
}
void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
{
void *pt;
if (!(pt = DRM(alloc)(size, area))) return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
DRM(free)(oldpt, oldsize, area);
}
return pt;
}
void DRM(free)(void *pt, size_t size, int area)
{
int alloc_count;
int free_count;
if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else kfree(pt);
spin_lock(&DRM(mem_lock));
DRM(mem_stats)[area].bytes_freed += size;
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
unsigned long DRM(alloc_pages)(int order, int area)
{
unsigned long address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
spin_lock(&DRM(mem_lock));
if ((DRM(ram_used) >> PAGE_SHIFT)
> (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
spin_unlock(&DRM(mem_lock));
return 0;
}
spin_unlock(&DRM(mem_lock));
address = __get_free_pages(GFP_KERNEL, order);
if (!address) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].fail_count;
spin_unlock(&DRM(mem_lock));
return 0;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_allocated += bytes;
DRM(ram_used) += bytes;
spin_unlock(&DRM(mem_lock));
/* Zero outside the lock */
memset((void *)address, 0, bytes);
/* Reserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
return address;
}
void DRM(free_pages)(unsigned long address, int order, int area)
{
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
unsigned long addr;
unsigned int sz;
if (!address) {
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
} else {
/* Unreserve */
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
free_pages(address, order);
}
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[area].free_count;
alloc_count = DRM(mem_stats)[area].succeed_count;
DRM(mem_stats)[area].bytes_freed += bytes;
DRM(ram_used) -= bytes;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
void *DRM(ioremap)(unsigned long offset, unsigned long size)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = ioremap(offset, size))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&DRM(mem_lock));
return pt;
}
void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = ioremap_nocache(offset, size))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&DRM(mem_lock));
return pt;
}
void DRM(ioremapfree)(void *pt, unsigned long size)
{
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
iounmap(pt);
spin_lock(&DRM(mem_lock));
DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
#if __REALLY_HAVE_AGP
agp_memory *DRM(alloc_agp)(int pages, u32 type)
{
agp_memory *handle;
if (!pages) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
return NULL;
}
if ((handle = DRM(agp_allocate_memory)(pages, type))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+= pages << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
return handle;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
spin_unlock(&DRM(mem_lock));
return NULL;
}
int DRM(free_agp)(agp_memory *handle, int pages)
{
int alloc_count;
int free_count;
int retval = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Attempt to free NULL AGP handle\n");
return retval;;
}
if (DRM(agp_free_memory)(handle)) {
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+= pages << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
return 0;
}
return retval;
}
int DRM(bind_agp)(agp_memory *handle, unsigned int start)
{
int retcode = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Attempt to bind NULL AGP handle\n");
return retcode;
}
if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
return retcode;
}
spin_lock(&DRM(mem_lock));
++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
spin_unlock(&DRM(mem_lock));
return retcode;
}
int DRM(unbind_agp)(agp_memory *handle)
{
int alloc_count;
int free_count;
int retcode = -EINVAL;
if (!handle) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Attempt to unbind NULL AGP handle\n");
return retcode;
}
if ((retcode = DRM(agp_unbind_memory)(handle))) return retcode;
spin_lock(&DRM(mem_lock));
free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+= handle->page_count << PAGE_SHIFT;
spin_unlock(&DRM(mem_lock));
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
return retcode;
}
#endif
......@@ -49,10 +49,6 @@ static int DRM(bufs_info)(char *buf, char **start, off_t offset,
static int DRM(vma_info)(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#endif
#if __HAVE_DMA_HISTOGRAM
static int DRM(histo_info)(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#endif
struct drm_proc_list {
const char *name;
......@@ -67,9 +63,6 @@ struct drm_proc_list {
#if DRM_DEBUG_CODE
{ "vma", DRM(vma_info) },
#endif
#if __HAVE_DMA_HISTOGRAM
{ "histo", DRM(histo_info) },
#endif
};
#define DRM_PROC_ENTRIES (sizeof(DRM(proc_list))/sizeof(DRM(proc_list)[0]))
......@@ -490,143 +483,3 @@ static int DRM(vma_info)(char *buf, char **start, off_t offset, int request,
#endif
#if __HAVE_DMA_HISTOGRAM
static int DRM(_histo_info)(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int len = 0;
drm_device_dma_t *dma = dev->dma;
int i;
unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
unsigned long prev_value = 0;
drm_buf_t *buffer;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("general statistics:\n");
DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total));
DRM_PROC_PRINT("open %10u\n",
atomic_read(&dev->counts[_DRM_STAT_OPENS]));
DRM_PROC_PRINT("close %10u\n",
atomic_read(&dev->counts[_DRM_STAT_CLOSES]));
DRM_PROC_PRINT("ioctl %10u\n",
atomic_read(&dev->counts[_DRM_STAT_IOCTLS]));
DRM_PROC_PRINT("\nlock statistics:\n");
DRM_PROC_PRINT("locks %10u\n",
atomic_read(&dev->counts[_DRM_STAT_LOCKS]));
DRM_PROC_PRINT("unlocks %10u\n",
atomic_read(&dev->counts[_DRM_STAT_UNLOCKS]));
if (dma) {
#if 0
DRM_PROC_PRINT("\ndma statistics:\n");
DRM_PROC_PRINT("prio %10u\n",
atomic_read(&dma->total_prio));
DRM_PROC_PRINT("bytes %10u\n",
atomic_read(&dma->total_bytes));
DRM_PROC_PRINT("dmas %10u\n",
atomic_read(&dma->total_dmas));
DRM_PROC_PRINT("missed:\n");
DRM_PROC_PRINT(" dma %10u\n",
atomic_read(&dma->total_missed_dma));
DRM_PROC_PRINT(" lock %10u\n",
atomic_read(&dma->total_missed_lock));
DRM_PROC_PRINT(" free %10u\n",
atomic_read(&dma->total_missed_free));
DRM_PROC_PRINT(" sched %10u\n",
atomic_read(&dma->total_missed_sched));
DRM_PROC_PRINT("tried %10u\n",
atomic_read(&dma->total_tried));
DRM_PROC_PRINT("hit %10u\n",
atomic_read(&dma->total_hit));
DRM_PROC_PRINT("lost %10u\n",
atomic_read(&dma->total_lost));
#endif
buffer = dma->next_buffer;
if (buffer) {
DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx);
} else {
DRM_PROC_PRINT("next_buffer none\n");
}
buffer = dma->this_buffer;
if (buffer) {
DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx);
} else {
DRM_PROC_PRINT("this_buffer none\n");
}
}
DRM_PROC_PRINT("\nvalues:\n");
if (dev->lock.hw_lock) {
DRM_PROC_PRINT("lock 0x%08x\n",
dev->lock.hw_lock->lock);
} else {
DRM_PROC_PRINT("lock none\n");
}
DRM_PROC_PRINT("context_flag 0x%08lx\n", dev->context_flag);
DRM_PROC_PRINT("interrupt_flag 0x%08lx\n", dev->interrupt_flag);
DRM_PROC_PRINT("dma_flag 0x%08lx\n", dev->dma_flag);
DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count);
DRM_PROC_PRINT("last_context %10d\n", dev->last_context);
DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch);
DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked);
DRM_PROC_PRINT("\n q2d d2c c2f"
" q2c q2f dma sch"
" ctx lacq lhld\n\n");
for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u"
" %10u %10u %10u %10u %10u\n",
i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
i == DRM_DMA_HISTOGRAM_SLOTS - 1
? prev_value : slot_value ,
atomic_read(&dev->histo
.queued_to_dispatched[i]),
atomic_read(&dev->histo
.dispatched_to_completed[i]),
atomic_read(&dev->histo
.completed_to_freed[i]),
atomic_read(&dev->histo
.queued_to_completed[i]),
atomic_read(&dev->histo
.queued_to_freed[i]),
atomic_read(&dev->histo.dma[i]),
atomic_read(&dev->histo.schedule[i]),
atomic_read(&dev->histo.ctx[i]),
atomic_read(&dev->histo.lacq[i]),
atomic_read(&dev->histo.lhld[i]));
prev_value = slot_value;
slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
}
if (len > request + offset) return request;
*eof = 1;
return len - offset;
}
static int DRM(histo_info)(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
drm_device_t *dev = (drm_device_t *)data;
int ret;
down(&dev->struct_sem);
ret = DRM(_histo_info)(buf, start, offset, request, eof, data);
up(&dev->struct_sem);
return ret;
}
#endif
......@@ -71,6 +71,9 @@
#define __HAVE_OLD_DMA 1
#define __HAVE_PCI_DMA 1
#define __HAVE_DRIVER_FOPS_READ 1
#define __HAVE_DRIVER_FOPS_POLL 1
#define __HAVE_MULTIPLE_DMA_QUEUES 1
#define __HAVE_DMA_WAITQUEUE 1
......@@ -87,62 +90,19 @@
#define __HAVE_DMA_QUIESCENT 1
#define DRIVER_DMA_QUIESCENT() do { \
/* FIXME ! */ \
gamma_dma_quiescent_single(dev); \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private; \
if (dev_priv->num_rast == 2) \
gamma_dma_quiescent_dual(dev); \
else gamma_dma_quiescent_single(dev); \
return 0; \
} while (0)
#define __HAVE_DMA_IRQ 1
#define __HAVE_DMA_IRQ_BH 1
#if 1
#define DRIVER_PREINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); \
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \
} while (0)
#define DRIVER_POSTINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3); \
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \
} while (0)
#else
#define DRIVER_POSTINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002000 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000004 ); \
} while (0)
#define DRIVER_PREINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
GAMMA_WRITE( GAMMA_GCOMMANDMODE, GAMMA_QUEUED_DMA_MODE );\
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );\
} while (0)
#endif
#define DRIVER_UNINSTALL() do { \
drm_gamma_private_t *dev_priv = \
(drm_gamma_private_t *)dev->dev_private;\
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2); \
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3); \
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \
} while (0)
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
((drm_gamma_private_t *)((dev)->dev_private))->buffers
#endif /* __GAMMA_H__ */
......@@ -139,15 +139,9 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
drm_buf_t *buf;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t dma_start, dma_stop;
#endif
if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
#if DRM_DMA_HISTOGRAM
dma_start = get_cycles();
#endif
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
......@@ -221,9 +215,6 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
buf->pending = 1;
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
#if DRM_DMA_HISTOGRAM
buf->time_dispatched = get_cycles();
#endif
/* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
address = buf->idx << 12;
......@@ -245,10 +236,6 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
clear_bit(0, &dev->dma_flag);
#if DRM_DMA_HISTOGRAM
dma_stop = get_cycles();
atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]);
#endif
return retcode;
}
......@@ -273,9 +260,6 @@ int gamma_dma_schedule(drm_device_t *dev, int locked)
int missed;
int expire = 20;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t schedule_start;
#endif
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
......@@ -284,9 +268,6 @@ int gamma_dma_schedule(drm_device_t *dev, int locked)
}
missed = atomic_read(&dev->counts[10]);
#if DRM_DMA_HISTOGRAM
schedule_start = get_cycles();
#endif
again:
if (dev->context_flag) {
......@@ -333,10 +314,6 @@ int gamma_dma_schedule(drm_device_t *dev, int locked)
clear_bit(0, &dev->interrupt_flag);
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.schedule[gamma_histogram_slot(get_cycles()
- schedule_start)]);
#endif
return retcode;
}
......@@ -448,10 +425,6 @@ static int gamma_dma_priority(struct file *filp,
}
}
#if DRM_DMA_HISTOGRAM
buf->time_queued = get_cycles();
buf->time_dispatched = buf->time_queued;
#endif
gamma_dma_dispatch(dev, address, length);
atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
......@@ -604,6 +577,8 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
dev_priv->num_rast = init->num_rast;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
if( r_list->map &&
......@@ -667,6 +642,7 @@ int gamma_do_cleanup_dma( drm_device_t *dev )
if ( dev->dev_private ) {
drm_gamma_private_t *dev_priv = dev->dev_private;
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
......@@ -831,3 +807,35 @@ int gamma_setsareactx(struct inode *inode, struct file *filp,
up(&dev->struct_sem);
return 0;
}
void DRM(driver_irq_preinstall)( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
}
void DRM(driver_irq_postinstall)( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
}
void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3);
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
}
......@@ -84,6 +84,7 @@ typedef struct drm_gamma_init {
unsigned int mmio2;
unsigned int mmio3;
unsigned int buffers_offset;
int num_rast;
} drm_gamma_init_t;
#endif /* _GAMMA_DRM_H_ */
......@@ -39,16 +39,18 @@
#include "drm_auth.h"
#include "drm_agpsupport.h"
#include "drm_bufs.h"
#include "drm_context.h"
#include "gamma_context.h" /* NOTE! */
#include "drm_dma.h"
#include "gamma_old_dma.h" /* NOTE */
#include "drm_drawable.h"
#include "drm_drv.h"
#include "drm_fops.h"
#include "drm_init.h"
#include "drm_ioctl.h"
#include "drm_lists.h"
#include "gamma_lists.h" /* NOTE */
#include "drm_lock.h"
#include "gamma_lock.h" /* NOTE */
#include "drm_memory.h"
#include "drm_proc.h"
#include "drm_vm.h"
......
......@@ -40,6 +40,7 @@ typedef struct drm_gamma_private {
drm_map_t *mmio1;
drm_map_t *mmio2;
drm_map_t *mmio3;
int num_rast;
} drm_gamma_private_t;
/* gamma_dma.c */
......@@ -59,6 +60,29 @@ extern int gamma_dma(struct inode *inode, struct file *filp,
extern int gamma_find_devices(void);
extern int gamma_found(void);
/* Gamma-specific code pulled from drm_dma.h:
*/
extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
/* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h):
*/
extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
extern int DRM(freelist_destroy)(drm_freelist_t *bl);
extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#define GLINT_DRI_BUF_COUNT 256
#define GAMMA_OFF(reg) \
......
......@@ -93,7 +93,7 @@
*/
#define __HAVE_DMA 1
#define __HAVE_DMA_QUEUE 1
#define __HAVE_DMA_WAITLIST 1
#define __HAVE_DMA_WAITLIST 0
#define __HAVE_DMA_RECLAIM 1
#define __HAVE_DMA_QUIESCENT 1
......
......@@ -115,9 +115,7 @@ static struct file_operations i810_buffer_fops = {
.release = DRM(release),
.ioctl = DRM(ioctl),
.mmap = i810_mmap_buffers,
.read = DRM(read),
.fasync = DRM(fasync),
.poll = DRM(poll),
};
int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
......@@ -256,6 +254,7 @@ static int i810_dma_cleanup(drm_device_t *dev)
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[ i ];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if ( buf_priv->kernel_virtual && buf->total )
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
}
}
......
......@@ -49,7 +49,6 @@
#include "drm_init.h"
#include "drm_ioctl.h"
#include "drm_lock.h"
#include "drm_lists.h"
#include "drm_memory.h"
#include "drm_proc.h"
#include "drm_vm.h"
......
......@@ -94,7 +94,7 @@
*/
#define __HAVE_DMA 1
#define __HAVE_DMA_QUEUE 1
#define __HAVE_DMA_WAITLIST 1
#define __HAVE_DMA_WAITLIST 0
#define __HAVE_DMA_RECLAIM 1
#define __HAVE_DMA_QUIESCENT 1
......
......@@ -115,9 +115,7 @@ static struct file_operations i830_buffer_fops = {
.release = DRM(release),
.ioctl = DRM(ioctl),
.mmap = i830_mmap_buffers,
.read = DRM(read),
.fasync = DRM(fasync),
.poll = DRM(poll),
};
int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
......@@ -265,6 +263,7 @@ static int i830_dma_cleanup(drm_device_t *dev)
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[ i ];
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
if ( buf_priv->kernel_virtual && buf->total )
DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
}
}
......
......@@ -51,7 +51,6 @@
#include "drm_init.h"
#include "drm_ioctl.h"
#include "drm_lock.h"
#include "drm_lists.h"
#include "drm_memory.h"
#include "drm_proc.h"
#include "drm_vm.h"
......
......@@ -642,8 +642,11 @@ int mga_do_cleanup_dma( drm_device_t *dev )
if ( dev->dev_private ) {
drm_mga_private_t *dev_priv = dev->dev_private;
if ( dev_priv->warp != NULL )
DRM_IOREMAPFREE( dev_priv->warp );
if ( dev_priv->primary != NULL )
DRM_IOREMAPFREE( dev_priv->primary );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
if ( dev_priv->head != NULL ) {
......
......@@ -619,8 +619,11 @@ int r128_do_cleanup_cce( drm_device_t *dev )
#if __REALLY_HAVE_SG
if ( !dev_priv->is_pci ) {
#endif
if ( dev_priv->cce_ring != NULL )
DRM_IOREMAPFREE( dev_priv->cce_ring );
if ( dev_priv->ring_rptr != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
#if __REALLY_HAVE_SG
} else {
......
......@@ -78,6 +78,7 @@
* Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
* R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
* 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
* Add 'GET' queries for starting additional clients on different VT's.
*/
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
......
......@@ -36,12 +36,6 @@
#define RADEON_FIFO_DEBUG 0
#if defined(__alpha__) || defined(__powerpc__)
# define PCIGART_ENABLED
#else
# undef PCIGART_ENABLED
#endif
/* CP microcode (from ATI) */
static u32 R200_cp_microcode[][2] = {
......@@ -777,7 +771,7 @@ static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv )
cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR );
RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr );
*dev_priv->ring.head = cur_read_ptr;
SET_RING_HEAD( dev_priv, cur_read_ptr );
dev_priv->ring.tail = cur_read_ptr;
}
......@@ -889,13 +883,18 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
/* Initialize the ring buffer's read and write pointers */
cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR );
RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr );
*dev_priv->ring.head = cur_read_ptr;
SET_RING_HEAD( dev_priv, cur_read_ptr );
dev_priv->ring.tail = cur_read_ptr;
#if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) {
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset );
} else {
dev_priv->ring_rptr->offset
- dev->agp->base
+ dev_priv->agp_vm_start);
} else
#endif
{
drm_sg_mem_t *entry = dev->sg;
unsigned long tmp_ofs, page_ofs;
......@@ -920,7 +919,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
+ RADEON_SCRATCH_REG_OFFSET );
dev_priv->scratch = ((__volatile__ u32 *)
dev_priv->ring.head +
dev_priv->ring_rptr->handle +
(RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 );
......@@ -990,17 +989,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->is_pci = init->is_pci;
#if !defined(PCIGART_ENABLED)
/* PCI support is not 100% working, so we disable it here.
*/
if ( dev_priv->is_pci ) {
DRM_ERROR( "PCI GART not yet supported for Radeon!\n" );
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
#endif
if ( dev_priv->is_pci && !dev->sg ) {
DRM_ERROR( "PCI GART memory not allocated!\n" );
dev->dev_private = (void *)dev_priv;
......@@ -1098,6 +1086,13 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
DRM_GETSAREA();
dev_priv->fb_offset = init->fb_offset;
dev_priv->mmio_offset = init->mmio_offset;
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->agp_textures_offset = init->agp_textures_offset;
if(!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
......@@ -1204,9 +1199,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
DRM_DEBUG( "dev_priv->agp_buffers_offset 0x%lx\n",
dev_priv->agp_buffers_offset );
dev_priv->ring.head = ((__volatile__ u32 *)
dev_priv->ring_rptr->handle);
dev_priv->ring.start = (u32 *)dev_priv->cp_ring->handle;
dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
......@@ -1217,7 +1209,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
(dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
dev_priv->ring.ring_rptr = dev_priv->ring_rptr;
#if __REALLY_HAVE_SG
if ( dev_priv->is_pci ) {
......@@ -1279,8 +1270,11 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
drm_radeon_private_t *dev_priv = dev->dev_private;
if ( !dev_priv->is_pci ) {
if ( dev_priv->cp_ring != NULL )
DRM_IOREMAPFREE( dev_priv->cp_ring );
if ( dev_priv->ring_rptr != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
} else {
#if __REALLY_HAVE_SG
......@@ -1592,10 +1586,10 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
{
drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
int i;
u32 last_head = GET_RING_HEAD(ring);
u32 last_head = GET_RING_HEAD( dev_priv );
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
u32 head = GET_RING_HEAD(ring);
u32 head = GET_RING_HEAD( dev_priv );
ring->space = (head - ring->tail) * sizeof(u32);
if ( ring->space <= 0 )
......
......@@ -532,6 +532,10 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_LAST_CLEAR 4
#define RADEON_PARAM_IRQ_NR 5
#define RADEON_PARAM_AGP_BASE 6 /* card offset of agp base */
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
#define RADEON_PARAM_STATUS_HANDLE 8
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_AGP_TEX_HANDLE 10
typedef struct drm_radeon_getparam {
int param;
......
......@@ -31,8 +31,8 @@
#ifndef __RADEON_DRV_H__
#define __RADEON_DRV_H__
#define GET_RING_HEAD(ring) DRM_READ32( (ring)->ring_rptr, 0 ) /* (ring)->head */
#define SET_RING_HEAD(ring,val) DRM_WRITE32( (ring)->ring_rptr, 0, (val) ) /* (ring)->head */
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
typedef struct drm_radeon_freelist {
unsigned int age;
......@@ -47,13 +47,11 @@ typedef struct drm_radeon_ring_buffer {
int size;
int size_l2qw;
volatile u32 *head;
u32 tail;
u32 tail_mask;
int space;
int high_mark;
drm_local_map_t *ring_rptr;
} drm_radeon_ring_buffer_t;
typedef struct drm_radeon_depth_clear_t {
......@@ -127,6 +125,13 @@ typedef struct drm_radeon_private {
drm_radeon_depth_clear_t depth_clear;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
drm_local_map_t *sarea;
drm_local_map_t *fb;
drm_local_map_t *mmio;
......@@ -775,7 +780,7 @@ extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
do { \
if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \
u32 head = GET_RING_HEAD(&dev_priv->ring); \
u32 head = GET_RING_HEAD( dev_priv ); \
if (head == dev_priv->ring.tail) \
dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \
} \
......@@ -847,8 +852,8 @@ do { \
#define COMMIT_RING() do { \
/* Flush writes to ring */ \
DRM_READMEMORYBARRIER(dev_priv->mmio); \
GET_RING_HEAD( &dev_priv->ring ); \
DRM_READMEMORYBARRIER( dev_priv->mmio ); \
GET_RING_HEAD( dev_priv ); \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
/* read from PCI bus to ensure correct posting */ \
RADEON_READ( RADEON_CP_RB_RPTR ); \
......
......@@ -2191,6 +2191,19 @@ int radeon_cp_getparam( DRM_IOCTL_ARGS )
case RADEON_PARAM_AGP_BASE:
value = dev_priv->agp_vm_start;
break;
case RADEON_PARAM_REGISTER_HANDLE:
value = dev_priv->mmio_offset;
break;
case RADEON_PARAM_STATUS_HANDLE:
value = dev_priv->ring_rptr_offset;
break;
case RADEON_PARAM_SAREA_HANDLE:
/* The lock is the first dword in the sarea. */
value = (int)dev->lock.hw_lock;
break;
case RADEON_PARAM_AGP_TEX_HANDLE:
value = dev_priv->agp_textures_offset;
break;
default:
return DRM_ERR(EINVAL);
}
......
......@@ -41,7 +41,6 @@
#include "drm_fops.h"
#include "drm_init.h"
#include "drm_ioctl.h"
#include "drm_lists.h"
#include "drm_lock.h"
#include "drm_memory.h"
#include "drm_proc.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment