Commit d9fd3c1c authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://cifs.bkbits.net/linux-2.5cifs

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 44ce64b1 b596fd12
...@@ -210,6 +210,11 @@ config FORCE_MAX_ZONEORDER ...@@ -210,6 +210,11 @@ config FORCE_MAX_ZONEORDER
depends on SA1111 depends on SA1111
default "9" default "9"
config DMABOUNCE
bool
depends on SA1111
default y
source arch/arm/mm/Kconfig source arch/arm/mm/Kconfig
# bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER # bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
obj-y += platform.o obj-y += platform.o
obj-$(CONFIG_ARM_AMBA) += amba.o obj-$(CONFIG_ARM_AMBA) += amba.o
obj-$(CONFIG_ICST525) += icst525.o obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_PCI_HOST_PLX90X0) += plx90x0.o obj-$(CONFIG_PCI_HOST_PLX90X0) += plx90x0.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
/* /*
* linux/arch/arm/mach-sa1100/sa1111-pcibuf.c * arch/arm/common/dmabounce.c
* *
* Special dma_{map/unmap/dma_sync}_* routines for SA-1111. * Special dma_{map/unmap/dma_sync}_* routines for systems that have
* limited DMA windows. These functions utilize bounce buffers to
* copy data to/from buffers located outside the DMA region. This
* only works for systems in which DMA memory is at the bottom of
* RAM and the remainder of memory is at the top an the DMA memory
* can be marked as ZONE_DMA. Anything beyond that such as discontigous
* DMA windows will require custom implementations that reserve memory
* areas at early bootup.
* *
* These functions utilize bouncer buffers to compensate for a bug in
* the SA-1111 hardware which don't allow DMA to/from addresses
* certain addresses above 1MB.
*
* Re-written by Christopher Hoover <ch@murgatroid.com>
* Original version by Brad Parker (brad@heeltoe.com) * Original version by Brad Parker (brad@heeltoe.com)
* Re-written by Christopher Hoover <ch@murgatroid.com>
* Made generic by Deepak Saxena <dsaxena@plexity.net>
* *
* Copyright (C) 2002 Hewlett Packard Company. * Copyright (C) 2002 Hewlett Packard Company.
* Copyright (C) 2004 MontaVista Software, Inc.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation. * version 2 as published by the Free Software Foundation.
* */ */
//#define DEBUG
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <asm/hardware/sa1111.h> #include <linux/list.h>
//#define STATS #undef DEBUG
#undef STATS
#ifdef STATS #ifdef STATS
#define DO_STATS(X) do { X ; } while (0) #define DO_STATS(X) do { X ; } while (0)
#else #else
...@@ -43,116 +47,101 @@ struct safe_buffer { ...@@ -43,116 +47,101 @@ struct safe_buffer {
/* original request */ /* original request */
void *ptr; void *ptr;
size_t size; size_t size;
enum dma_data_direction direction; int direction;
/* safe buffer info */ /* safe buffer info */
struct dma_pool *pool; struct dma_pool *pool;
void *safe; void *safe;
dma_addr_t safe_dma_addr; dma_addr_t safe_dma_addr;
struct device *dev;
}; };
static LIST_HEAD(safe_buffers); struct dmabounce_device_info {
struct list_head node;
#define SIZE_SMALL 1024 struct device *dev;
#define SIZE_LARGE (4*1024) struct dma_pool *small_buffer_pool;
struct dma_pool *large_buffer_pool;
struct list_head safe_buffers;
unsigned long small_buffer_size, large_buffer_size;
#ifdef STATS
unsigned long sbp_allocs;
unsigned long lbp_allocs;
unsigned long total_allocs;
unsigned long map_op_count;
unsigned long bounce_count;
#endif
};
static struct dma_pool *small_buffer_pool, *large_buffer_pool; static LIST_HEAD(dmabounce_devs);
#ifdef STATS #ifdef STATS
static unsigned long sbp_allocs __initdata = 0; static void print_alloc_stats(struct dmabounce_device_info *device_info)
static unsigned long lbp_allocs __initdata = 0;
static unsigned long total_allocs __initdata= 0;
static void print_alloc_stats(void)
{ {
printk(KERN_INFO printk(KERN_INFO
"sa1111_dmabuf: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
sbp_allocs, lbp_allocs, device_info->dev->bus_id,
total_allocs - sbp_allocs - lbp_allocs, total_allocs); device_info->sbp_allocs, device_info->lbp_allocs,
device_info->total_allocs - device_info->sbp_allocs -
device_info->lbp_allocs,
device_info->total_allocs);
} }
#endif #endif
static int __init create_safe_buffer_pools(void) /* find the given device in the dmabounce device list */
static inline struct dmabounce_device_info *
find_dmabounce_dev(struct device *dev)
{ {
small_buffer_pool = dma_pool_create("sa1111_small_dma_buffer", struct list_head *entry;
NULL, SIZE_SMALL,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (small_buffer_pool == NULL) {
printk(KERN_ERR
"sa1111_dmabuf: could not allocate small pci pool\n");
return -ENOMEM;
}
large_buffer_pool = dma_pool_create("sa1111_large_dma_buffer",
NULL, SIZE_LARGE,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (large_buffer_pool == NULL) {
printk(KERN_ERR
"sa1111_dmabuf: could not allocate large pci pool\n");
dma_pool_destroy(small_buffer_pool);
small_buffer_pool = NULL;
return -ENOMEM;
}
printk(KERN_INFO "SA1111: DMA buffer sizes: small=%u, large=%u\n", list_for_each(entry, &dmabounce_devs) {
SIZE_SMALL, SIZE_LARGE); struct dmabounce_device_info *d =
list_entry(entry, struct dmabounce_device_info, node);
return 0; if (d->dev == dev)
} return d;
}
static void __exit destroy_safe_buffer_pools(void)
{
if (small_buffer_pool)
dma_pool_destroy(small_buffer_pool);
if (large_buffer_pool)
dma_pool_destroy(large_buffer_pool);
small_buffer_pool = large_buffer_pool = NULL;
} }
/* allocate a 'safe' buffer and keep track of it */ /* allocate a 'safe' buffer and keep track of it */
static struct safe_buffer *alloc_safe_buffer(struct device *dev, void *ptr, static inline struct safe_buffer *
size_t size, alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
struct dma_pool *pool; struct dma_pool *pool;
struct device *dev = device_info->dev;
void *safe; void *safe;
dma_addr_t safe_dma_addr; dma_addr_t safe_dma_addr;
dev_dbg(dev, "%s(ptr=%p, size=%d, direction=%d)\n", dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
__func__, ptr, size, dir); __func__, ptr, size, dir);
DO_STATS ( total_allocs++ ); DO_STATS ( device_info->total_allocs++ );
buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
if (buf == NULL) { if (buf == 0) {
printk(KERN_WARNING "%s: kmalloc failed\n", __func__); dev_warn(dev, "%s: kmalloc failed\n", __func__);
return 0; return 0;
} }
if (size <= SIZE_SMALL) { if (size <= device_info->small_buffer_size) {
pool = small_buffer_pool; pool = device_info->small_buffer_pool;
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
DO_STATS ( sbp_allocs++ ); DO_STATS ( device_info->sbp_allocs++ );
} else if (size <= SIZE_LARGE) { } else if (size <= device_info->large_buffer_size) {
pool = large_buffer_pool; pool = device_info->large_buffer_pool;
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
DO_STATS ( lbp_allocs++ ); DO_STATS ( device_info->lbp_allocs++ );
} else { } else {
pool = NULL; pool = 0;
safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC);
} }
if (safe == NULL) { if (safe == 0) {
printk(KERN_WARNING dev_warn(device_info->dev,
"%s: could not alloc dma memory (size=%d)\n", "%s: could not alloc dma memory (size=%d)\n",
__func__, size); __func__, size);
kfree(buf); kfree(buf);
...@@ -160,191 +149,213 @@ static struct safe_buffer *alloc_safe_buffer(struct device *dev, void *ptr, ...@@ -160,191 +149,213 @@ static struct safe_buffer *alloc_safe_buffer(struct device *dev, void *ptr,
} }
#ifdef STATS #ifdef STATS
if (total_allocs % 1000 == 0) if (device_info->total_allocs % 1000 == 0)
print_alloc_stats(); print_alloc_stats(device_info);
#endif #endif
BUG_ON(sa1111_check_dma_bug(safe_dma_addr)); // paranoia
buf->ptr = ptr; buf->ptr = ptr;
buf->size = size; buf->size = size;
buf->direction = dir; buf->direction = dir;
buf->pool = pool; buf->pool = pool;
buf->safe = safe; buf->safe = safe;
buf->safe_dma_addr = safe_dma_addr; buf->safe_dma_addr = safe_dma_addr;
buf->dev = dev;
list_add(&buf->node, &safe_buffers); list_add(&buf->node, &device_info->safe_buffers);
return buf; return buf;
} }
/* determine if a buffer is from our "safe" pool */ /* determine if a buffer is from our "safe" pool */
static struct safe_buffer *find_safe_buffer(struct device *dev, static inline struct safe_buffer *
dma_addr_t safe_dma_addr) find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{ {
struct list_head *entry; struct list_head *entry;
list_for_each(entry, &safe_buffers) { list_for_each(entry, &device_info->safe_buffers) {
struct safe_buffer *b = struct safe_buffer *b =
list_entry(entry, struct safe_buffer, node); list_entry(entry, struct safe_buffer, node);
if (b->safe_dma_addr == safe_dma_addr && if (b->safe_dma_addr == safe_dma_addr)
b->dev == dev) {
return b; return b;
} }
}
return 0; return NULL;
} }
static void free_safe_buffer(struct safe_buffer *buf) static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{ {
pr_debug("%s(buf=%p)\n", __func__, buf); dev_dbg(dev_info->dev, "%s(buf=%p)\n", __func__, buf);
list_del(&buf->node); list_del(&buf->node);
if (buf->pool) if (buf->pool)
dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
else else
dma_free_coherent(buf->dev, buf->size, buf->safe, dma_free_coherent(device_info->dev, buf->size, buf->safe,
buf->safe_dma_addr); buf->safe_dma_addr);
kfree(buf);
}
static inline int dma_range_is_safe(struct device *dev, dma_addr_t addr,
size_t size)
{
unsigned int physaddr = SA1111_DMA_ADDR((unsigned int) addr);
/* Any address within one megabyte of the start of the target
* bank will be OK. This is an overly conservative test:
* other addresses can be OK depending on the dram
* configuration. (See sa1111.c:sa1111_check_dma_bug() * for
* details.)
*
* We take care to ensure the entire dma region is within
* the safe range.
*/
return ((physaddr + size - 1) < (1<<20)); kfree(buf);
} }
/* ************************************************** */ /* ************************************************** */
#ifdef STATS #ifdef STATS
static unsigned long map_op_count __initdata = 0;
static unsigned long bounce_count __initdata = 0;
static void print_map_stats(void) static void print_map_stats(struct dmabounce_device_info *device_info)
{ {
printk(KERN_INFO printk(KERN_INFO
"sa1111_dmabuf: map_op_count=%lu, bounce_count=%lu\n", "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n",
map_op_count, bounce_count); device_info->dev->bus_id,
device_info->map_op_count, device_info->bounce_count);
} }
#endif #endif
static dma_addr_t map_single(struct device *dev, void *ptr, static inline dma_addr_t
size_t size, enum dma_data_direction dir) map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
DO_STATS ( map_op_count++ ); if (device_info)
DO_STATS ( device_info->map_op_count++ );
if (dev->dma_mask) {
unsigned long limit;
limit = (*dev->dma_mask + 1) & ~(*dev->dma_mask);
if (limit && (size > limit)) {
dev_err(dev, "DMA mapping too big "
"(requested %#x mask %#Lx)\n",
size, *dev->dma_mask);
return ~0;
}
}
dma_addr = virt_to_bus(ptr); dma_addr = virt_to_bus(ptr);
if (!dma_range_is_safe(dev, dma_addr, size)) { if (device_info && dma_needs_bounce(dev, dma_addr, size)) {
struct safe_buffer *buf; struct safe_buffer *buf;
DO_STATS ( bounce_count++ ) ; buf = alloc_safe_buffer(device_info, ptr, size, dir);
if (buf == 0) {
buf = alloc_safe_buffer(dev, ptr, size, dir); dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
if (buf == NULL) {
printk(KERN_ERR
"%s: unable to map unsafe buffer %p!\n",
__func__, ptr); __func__, ptr);
return 0; return 0;
} }
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08x)\n", dev_dbg(dev,
__func__, "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
buf->ptr, virt_to_bus(buf->ptr), __func__, buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr); buf->safe, (void *) buf->safe_dma_addr);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { if ((dir == DMA_TO_DEVICE) ||
dev_dbg(dev, "%s: copy out from unsafe %p, to safe %p, size %d\n", (dir == DMA_BIDIRECTIONAL)) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size); __func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size); memcpy(buf->safe, ptr, size);
} }
consistent_sync(buf->safe, size, dir);
dma_addr = buf->safe_dma_addr; dma_addr = buf->safe_dma_addr;
ptr = buf->safe; } else {
}
consistent_sync(ptr, size, dir); consistent_sync(ptr, size, dir);
}
#ifdef STATS
if (map_op_count % 1000 == 0)
print_map_stats();
#endif
return dma_addr; return dma_addr;
} }
static void unmap_single(struct device *dev, dma_addr_t dma_addr, static inline void
size_t size, enum dma_data_direction dir) unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
struct safe_buffer *buf = NULL;
/*
* Trying to unmap an invalid mapping
*/
if (dma_addr == ~0) {
dev_err(dev, "Trying to unmap invalid mapping\n");
return;
}
buf = find_safe_buffer(dev, dma_addr); if (device_info)
buf = find_safe_buffer(device_info, dma_addr);
if (buf) { if (buf) {
BUG_ON(buf->size != size); BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)\n", dev_dbg(dev,
__func__, "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
buf->ptr, virt_to_bus(buf->ptr), __func__, buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr); buf->safe, (void *) buf->safe_dma_addr);
DO_STATS ( bounce_count++ );
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { DO_STATS ( device_info->bounce_count++ );
dev_dbg(dev, "%s: copy back from safe %p, to unsafe %p size %d\n",
if ((dir == DMA_FROM_DEVICE) ||
(dir == DMA_BIDIRECTIONAL)) {
dev_dbg(dev,
"%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size); __func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size); memcpy(buf->ptr, buf->safe, size);
} }
free_safe_buffer(buf); free_safe_buffer(device_info, buf);
} }
} }
static void sync_single(struct device *dev, dma_addr_t dma_addr, static inline void
size_t size, enum dma_data_direction dir) sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
void *ptr; struct safe_buffer *buf = NULL;
buf = find_safe_buffer(dev, dma_addr); if (device_info)
buf = find_safe_buffer(device_info, dma_addr);
if (buf) { if (buf) {
BUG_ON(buf->size != size); /*
BUG_ON(buf->direction != dir); * Both of these checks from original code need to be
* commented out b/c some drivers rely on the following:
*
* 1) Drivers may map a large chunk of memory into DMA space
* but only sync a small portion of it. Good example is
* allocating a large buffer, mapping it, and then
* breaking it up into small descriptors. No point
* in syncing the whole buffer if you only have to
* touch one descriptor.
*
* 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
* usually only synced in one dir at a time.
*
* See drivers/net/eepro100.c for examples of both cases.
*
* -ds
*
* BUG_ON(buf->size != size);
* BUG_ON(buf->direction != dir);
*/
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)\n", dev_dbg(dev,
__func__, "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
buf->ptr, virt_to_bus(buf->ptr), __func__, buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr); buf->safe, (void *) buf->safe_dma_addr);
DO_STATS ( bounce_count++ ); DO_STATS ( device_info->bounce_count++ );
switch (dir) { switch (dir) {
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
dev_dbg(dev, "%s: copy back from safe %p, to unsafe %p size %d\n", dev_dbg(dev,
"%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size); __func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size); memcpy(buf->ptr, buf->safe, size);
break; break;
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
dev_dbg(dev, "%s: copy out from unsafe %p, to safe %p, size %d\n", dev_dbg(dev,
"%s: copy out unsafe %p to safe %p, size %d\n",
__func__,buf->ptr, buf->safe, size); __func__,buf->ptr, buf->safe, size);
memcpy(buf->safe, buf->ptr, size); memcpy(buf->safe, buf->ptr, size);
break; break;
...@@ -353,11 +364,10 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr, ...@@ -353,11 +364,10 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr,
default: default:
BUG(); BUG();
} }
ptr = buf->safe; consistent_sync(buf->safe, size, dir);
} else { } else {
ptr = bus_to_virt(dma_addr); consistent_sync(bus_to_virt(dma_addr), size, dir);
} }
consistent_sync(ptr, size, dir);
} }
/* ************************************************** */ /* ************************************************** */
...@@ -368,8 +378,9 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr, ...@@ -368,8 +378,9 @@ static void sync_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one. * substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one) * (basically move the buffer from an unsafe area to a safe one)
*/ */
dma_addr_t sa1111_map_single(struct device *dev, void *ptr, dma_addr_t
size_t size, enum dma_data_direction dir) dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -394,13 +405,17 @@ dma_addr_t sa1111_map_single(struct device *dev, void *ptr, ...@@ -394,13 +405,17 @@ dma_addr_t sa1111_map_single(struct device *dev, void *ptr,
* the safe buffer. (basically return things back to the way they * the safe buffer. (basically return things back to the way they
* should be) * should be)
*/ */
void sa1111_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir) void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
dev_dbg(dev, "%s(ptr=%08lx,size=%d,dir=%x)\n", dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir); __func__, (void *) dma_addr, size, dir);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
...@@ -409,8 +424,9 @@ void sa1111_unmap_single(struct device *dev, dma_addr_t dma_addr, ...@@ -409,8 +424,9 @@ void sa1111_unmap_single(struct device *dev, dma_addr_t dma_addr,
local_irq_restore(flags); local_irq_restore(flags);
} }
int sa1111_map_sg(struct device *dev, struct scatterlist *sg, int
int nents, enum dma_data_direction dir) dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
...@@ -428,7 +444,8 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -428,7 +444,8 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int length = sg->length; unsigned int length = sg->length;
void *ptr = page_address(page) + offset; void *ptr = page_address(page) + offset;
sg->dma_address = map_single(dev, ptr, length, dir); sg->dma_address =
map_single(dev, ptr, length, dir);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -436,8 +453,9 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -436,8 +453,9 @@ int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
return nents; return nents;
} }
void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg, void
int nents, enum dma_data_direction dir) dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
...@@ -445,6 +463,8 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -445,6 +463,8 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir); __func__, sg, nents, dir);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
...@@ -457,13 +477,14 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -457,13 +477,14 @@ void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
local_irq_restore(flags); local_irq_restore(flags);
} }
void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, void
size_t size, enum dma_data_direction dir) dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
dev_dbg(dev, "%s(ptr=%08lx,size=%d,dir=%x)\n", dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir); __func__, (void *) dma_addr, size, dir);
local_irq_save(flags); local_irq_save(flags);
...@@ -472,13 +493,14 @@ void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, ...@@ -472,13 +493,14 @@ void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
local_irq_restore(flags); local_irq_restore(flags);
} }
void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, void
size_t size, enum dma_data_direction dir) dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
dev_dbg(dev, "%s(ptr=%08lx,size=%d,dir=%x)\n", dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir); __func__, (void *) dma_addr, size, dir);
local_irq_save(flags); local_irq_save(flags);
...@@ -487,8 +509,9 @@ void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, ...@@ -487,8 +509,9 @@ void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
local_irq_restore(flags); local_irq_restore(flags);
} }
void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, void
int nents, enum dma_data_direction dir) dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
...@@ -496,6 +519,8 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -496,6 +519,8 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir); __func__, sg, nents, dir);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
...@@ -508,8 +533,9 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -508,8 +533,9 @@ void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
local_irq_restore(flags); local_irq_restore(flags);
} }
void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, void
int nents, enum dma_data_direction dir) dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
...@@ -517,6 +543,8 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -517,6 +543,8 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir); __func__, sg, nents, dir);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
...@@ -529,38 +557,119 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -529,38 +557,119 @@ void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(sa1111_map_single); int
EXPORT_SYMBOL(sa1111_unmap_single); dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
EXPORT_SYMBOL(sa1111_map_sg); unsigned long large_buffer_size)
EXPORT_SYMBOL(sa1111_unmap_sg); {
EXPORT_SYMBOL(sa1111_dma_sync_single_for_cpu); struct dmabounce_device_info *device_info;
EXPORT_SYMBOL(sa1111_dma_sync_single_for_device);
EXPORT_SYMBOL(sa1111_dma_sync_sg_for_cpu);
EXPORT_SYMBOL(sa1111_dma_sync_sg_for_device);
/* **************************************** */ device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
if (!device_info) {
printk(KERN_ERR
"Could not allocated dmabounce_device_info for %s",
dev->bus_id);
return -ENOMEM;
}
static int __init sa1111_dmabuf_init(void) device_info->small_buffer_pool =
{ dma_pool_create("small_dmabounce_pool",
printk(KERN_DEBUG "sa1111_dmabuf: initializing SA-1111 DMA buffers\n"); dev,
small_buffer_size,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (!device_info->small_buffer_pool) {
printk(KERN_ERR
"dmabounce: could not allocate small DMA pool for %s\n",
dev->bus_id);
kfree(device_info);
return -ENOMEM;
}
if (large_buffer_size) {
device_info->large_buffer_pool =
dma_pool_create("large_dmabounce_pool",
dev,
large_buffer_size,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (!device_info->large_buffer_pool) {
printk(KERN_ERR
"dmabounce: could not allocate large DMA pool for %s\n",
dev->bus_id);
dma_pool_destroy(device_info->small_buffer_pool);
return -ENOMEM;
}
}
device_info->dev = dev;
device_info->small_buffer_size = small_buffer_size;
device_info->large_buffer_size = large_buffer_size;
INIT_LIST_HEAD(&device_info->safe_buffers);
return create_safe_buffer_pools(); #ifdef STATS
device_info->sbp_allocs = 0;
device_info->lbp_allocs = 0;
device_info->total_allocs = 0;
device_info->map_op_count = 0;
device_info->bounce_count = 0;
#endif
list_add(&device_info->node, &dmabounce_devs);
printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
dev->bus_id, dev->bus->name);
return 0;
} }
module_init(sa1111_dmabuf_init);
static void __exit sa1111_dmabuf_exit(void) void
dmabounce_unregister_dev(struct device *dev)
{ {
BUG_ON(!list_empty(&safe_buffers)); struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
if (!device_info) {
printk(KERN_WARNING
"%s: Never registered with dmabounce but attempting" \
"to unregister!\n", dev->bus_id);
return;
}
if (!list_empty(&device_info->safe_buffers)) {
printk(KERN_ERR,
"%s: Removing from dmabounce with pending buffers!\n",
dev->bus_id);
BUG();
}
if (device_info->small_buffer_pool)
dma_pool_destroy(device_info->small_buffer_pool);
if (device_info->large_buffer_pool)
dma_pool_destroy(device_info->large_buffer_pool);
#ifdef STATS #ifdef STATS
print_alloc_stats(); print_alloc_stats(device_info);
print_map_stats(); print_map_stats(device_info);
#endif #endif
destroy_safe_buffer_pools(); list_del(&device_info->node);
kfree(device_info);
printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
dev->bus_id, dev->bus->name);
} }
module_exit(sa1111_dmabuf_exit);
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for SA-1111."); EXPORT_SYMBOL(dma_map_single);
EXPORT_SYMBOL(dma_unmap_single);
EXPORT_SYMBOL(dma_map_sg);
EXPORT_SYMBOL(dma_unmap_sg);
EXPORT_SYMBOL(dma_sync_single);
EXPORT_SYMBOL(dma_sync_sg);
EXPORT_SYMBOL(dmabounce_register_dev);
EXPORT_SYMBOL(dmabounce_unregister_dev);
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -547,15 +548,6 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, ...@@ -547,15 +548,6 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id), snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id),
"%4.4lx", info->offset); "%4.4lx", info->offset);
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if (sachip->dev->dma_mask) {
dev->dma_mask = *sachip->dev->dma_mask;
dev->dev.dma_mask = &dev->dma_mask;
}
dev->devid = info->devid; dev->devid = info->devid;
dev->dev.parent = sachip->dev; dev->dev.parent = sachip->dev;
dev->dev.bus = &sa1111_bus_type; dev->dev.bus = &sa1111_bus_type;
...@@ -573,15 +565,37 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, ...@@ -573,15 +565,37 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
if (ret) { if (ret) {
printk("SA1111: failed to allocate resource for %s\n", printk("SA1111: failed to allocate resource for %s\n",
dev->res.name); dev->res.name);
kfree(dev);
goto out; goto out;
} }
ret = device_register(&dev->dev); ret = device_register(&dev->dev);
if (ret) { if (ret) {
release_resource(&dev->res); release_resource(&dev->res);
out:
kfree(dev); kfree(dev);
goto out;
}
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if (sachip->dev->dma_mask) {
dev->dma_mask = *sachip->dev->dma_mask;
dev->dev.dma_mask = &dev->dma_mask;
if (dev->dma_mask != 0xffffffffUL) {
ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
if (ret) {
printk("SA1111: Failed to register %s with dmabounce", dev->dev.bus_id);
kfree(dev);
device_unregister(dev);
}
}
} }
out:
return ret; return ret;
} }
...@@ -742,61 +756,31 @@ static void __sa1111_remove(struct sa1111 *sachip) ...@@ -742,61 +756,31 @@ static void __sa1111_remove(struct sa1111 *sachip)
* *
* This routine only identifies whether or not a given DMA address * This routine only identifies whether or not a given DMA address
* is susceptible to the bug. * is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/ */
int sa1111_check_dma_bug(dma_addr_t addr) int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{ {
struct sa1111 *sachip = g_sa1111;
unsigned int physaddr = SA1111_DMA_ADDR((unsigned int)addr); unsigned int physaddr = SA1111_DMA_ADDR((unsigned int)addr);
unsigned int smcr; u32 dma_mask = *dev->dma_mask;
/* Section 4.6 of the "Intel StrongARM SA-1111 Development Module /*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the * User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects * SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid. * Assabet, so any address in bank 1 is necessarily invalid.
*/ */
if ((machine_is_assabet() || machine_is_pfs168()) && addr >= 0xc8000000) if ((machine_is_assabet() || machine_is_pfs168()) &&
return -1; (addr >= 0xc8000000 || (addr + size) >= 0xc8000000))
return 1;
/* The bug only applies to buffers located more than one megabyte /*
* above the start of the target bank: * Check to see if either the start or end are illegal.
*/ */
if (physaddr<(1<<20)) return ((addr & ~(*dev->dma_mask))) ||
return 0; ((addr + size - 1) & ~(*dev->dma_mask));
smcr = sa1111_readl(sachip->base + SA1111_SMCR);
switch (FExtr(smcr, SMCR_DRAC)) {
case 01: /* 10 row + bank address bits, A<20> must not be set */
if (physaddr & (1<<20))
return -1;
break;
case 02: /* 11 row + bank address bits, A<23> must not be set */
if (physaddr & (1<<23))
return -1;
break;
case 03: /* 12 row + bank address bits, A<24> must not be set */
if (physaddr & (1<<24))
return -1;
break;
case 04: /* 13 row + bank address bits, A<25> must not be set */
if (physaddr & (1<<25))
return -1;
break;
case 05: /* 14 row + bank address bits, A<20> must not be set */
if (physaddr & (1<<20))
return -1;
break;
case 06: /* 15 row + bank address bits, A<20> must not be set */
if (physaddr & (1<<20))
return -1;
break;
default:
printk(KERN_ERR "%s(): invalid SMCR DRAC value 0%lo\n",
__FUNCTION__, FExtr(smcr, SMCR_DRAC));
return -1;
}
return 0;
} }
struct sa1111_save_data { struct sa1111_save_data {
...@@ -1293,7 +1277,6 @@ module_exit(sa1111_exit); ...@@ -1293,7 +1277,6 @@ module_exit(sa1111_exit);
MODULE_DESCRIPTION("Intel Corporation SA1111 core driver"); MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
EXPORT_SYMBOL(sa1111_check_dma_bug);
EXPORT_SYMBOL(sa1111_select_audio_mode); EXPORT_SYMBOL(sa1111_select_audio_mode);
EXPORT_SYMBOL(sa1111_set_audio_rate); EXPORT_SYMBOL(sa1111_set_audio_rate);
EXPORT_SYMBOL(sa1111_get_audio_rate); EXPORT_SYMBOL(sa1111_get_audio_rate);
......
...@@ -965,7 +965,7 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector, ...@@ -965,7 +965,7 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
struct cdrom_info *info = drive->driver_data; struct cdrom_info *info = drive->driver_data;
/* Number of sectors to read into the buffer. */ /* Number of sectors to read into the buffer. */
int sectors_to_buffer = MIN (sectors_to_transfer, int sectors_to_buffer = min_t(int, sectors_to_transfer,
(SECTOR_BUFFER_SIZE >> SECTOR_BITS) - (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
info->nsectors_buffered); info->nsectors_buffered);
...@@ -1114,7 +1114,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive) ...@@ -1114,7 +1114,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive)
/* First, figure out if we need to bit-bucket /* First, figure out if we need to bit-bucket
any of the leading sectors. */ any of the leading sectors. */
nskip = MIN((int)(rq->current_nr_sectors - bio_cur_sectors(rq->bio)), sectors_to_transfer); nskip = min_t(int, rq->current_nr_sectors - bio_cur_sectors(rq->bio), sectors_to_transfer);
while (nskip > 0) { while (nskip > 0) {
/* We need to throw away a sector. */ /* We need to throw away a sector. */
...@@ -1144,7 +1144,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive) ...@@ -1144,7 +1144,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive)
/* Transfer data to the buffers. /* Transfer data to the buffers.
Figure out how many sectors we can transfer Figure out how many sectors we can transfer
to the current buffer. */ to the current buffer. */
this_transfer = MIN (sectors_to_transfer, this_transfer = min_t(int, sectors_to_transfer,
rq->current_nr_sectors); rq->current_nr_sectors);
/* Read this_transfer sectors /* Read this_transfer sectors
...@@ -1860,7 +1860,7 @@ static ide_startstop_t cdrom_write_intr(ide_drive_t *drive) ...@@ -1860,7 +1860,7 @@ static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
/* /*
* Figure out how many sectors we can transfer * Figure out how many sectors we can transfer
*/ */
this_transfer = MIN(sectors_to_transfer,rq->current_nr_sectors); this_transfer = min_t(int, sectors_to_transfer, rq->current_nr_sectors);
while (this_transfer > 0) { while (this_transfer > 0) {
HWIF(drive)->atapi_output_bytes(drive, rq->buffer, SECTOR_SIZE); HWIF(drive)->atapi_output_bytes(drive, rq->buffer, SECTOR_SIZE);
......
...@@ -54,8 +54,6 @@ ...@@ -54,8 +54,6 @@
#define BLOCKS_PER_FRAME (CD_FRAMESIZE / BLOCK_SIZE) #define BLOCKS_PER_FRAME (CD_FRAMESIZE / BLOCK_SIZE)
#define MIN(a,b) ((a) < (b) ? (a) : (b))
/* special command codes for strategy routine. */ /* special command codes for strategy routine. */
#define PACKET_COMMAND 4315 #define PACKET_COMMAND 4315
#define REQUEST_SENSE_COMMAND 4316 #define REQUEST_SENSE_COMMAND 4316
......
...@@ -741,7 +741,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) ...@@ -741,7 +741,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
&& 0 < (signed long)(WAKEUP(drive) - (jiffies - best->service_time)) && 0 < (signed long)(WAKEUP(drive) - (jiffies - best->service_time))
&& 0 < (signed long)((jiffies + t) - WAKEUP(drive))) && 0 < (signed long)((jiffies + t) - WAKEUP(drive)))
{ {
ide_stall_queue(best, IDE_MIN(t, 10 * WAIT_MIN_SLEEP)); ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP));
goto repeat; goto repeat;
} }
} while ((drive = drive->next) != best); } while ((drive = drive->next) != best);
......
...@@ -508,8 +508,8 @@ int proc_ide_write_settings ...@@ -508,8 +508,8 @@ int proc_ide_write_settings
} }
if (*p != ':') if (*p != ':')
goto parse_error; goto parse_error;
len = IDE_MIN(p - start, MAX_LEN); len = min(p - start, MAX_LEN);
strncpy(name, start, IDE_MIN(len, MAX_LEN)); strncpy(name, start, min(len, MAX_LEN));
name[len] = 0; name[len] = 0;
if (n > 0) { if (n > 0) {
......
...@@ -4698,7 +4698,7 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor) ...@@ -4698,7 +4698,7 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
* Ensure that the number we got makes sense; limit * Ensure that the number we got makes sense; limit
* it within IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX. * it within IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
*/ */
tape->best_dsc_rw_frequency = max((unsigned long) min(t, (unsigned long) IDETAPE_DSC_RW_MAX), (unsigned long) IDETAPE_DSC_RW_MIN); tape->best_dsc_rw_frequency = max_t(unsigned long, min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), IDETAPE_DSC_RW_MIN);
printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
"%dkB pipeline, %lums tDSC%s\n", "%dkB pipeline, %lums tDSC%s\n",
drive->name, tape->name, tape->capabilities.speed, drive->name, tape->name, tape->capabilities.speed,
......
...@@ -243,25 +243,7 @@ int __init ali14xx_init(void) ...@@ -243,25 +243,7 @@ int __init ali14xx_init(void)
} }
#ifdef MODULE #ifdef MODULE
static void __exit ali14xx_release_hwif(ide_hwif_t *hwif)
{
if (hwif->chipset != ide_ali14xx)
return;
hwif->chipset = ide_unknown;
hwif->tuneproc = NULL;
hwif->mate = NULL;
hwif->channel = 0;
}
static void __exit ali14xx_exit(void)
{
ali14xx_release_hwif(&ide_hwifs[0]);
ali14xx_release_hwif(&ide_hwifs[1]);
}
module_init(ali14xx_init); module_init(ali14xx_init);
module_exit(ali14xx_exit);
#endif #endif
MODULE_AUTHOR("see local file"); MODULE_AUTHOR("see local file");
......
...@@ -155,27 +155,7 @@ int __init dtc2278_init(void) ...@@ -155,27 +155,7 @@ int __init dtc2278_init(void)
} }
#ifdef MODULE #ifdef MODULE
static void __exit dtc2278_release_hwif(ide_hwif_t *hwif)
{
if (hwif->chipset != ide_dtc2278)
return;
hwif->serialized = 0;
hwif->chipset = ide_unknown;
hwif->tuneproc = NULL;
hwif->drives[0].no_unmask = 0;
hwif->drives[1].no_unmask = 0;
hwif->mate = NULL;
}
static void __exit dtc2278_exit(void)
{
dtc2278_release_hwif(&ide_hwifs[0]);
dtc2278_release_hwif(&ide_hwifs[1]);
}
module_init(dtc2278_init); module_init(dtc2278_init);
module_exit(dtc2278_exit);
#endif #endif
MODULE_AUTHOR("See Local File"); MODULE_AUTHOR("See Local File");
......
...@@ -360,31 +360,7 @@ int __init ht6560b_init(void) ...@@ -360,31 +360,7 @@ int __init ht6560b_init(void)
} }
#ifdef MODULE #ifdef MODULE
static void __exit ht6560b_release_hwif(ide_hwif_t *hwif)
{
if (hwif->chipset != ide_ht6560b)
return;
hwif->chipset = ide_unknown;
hwif->tuneproc = NULL;
hwif->selectproc = NULL;
hwif->serialized = 0;
hwif->mate = NULL;
hwif->channel = 0;
hwif->drives[0].drive_data = 0;
hwif->drives[1].drive_data = 0;
}
static void __exit ht6560b_exit(void)
{
ht6560b_release_hwif(&ide_hwifs[0]);
ht6560b_release_hwif(&ide_hwifs[1]);
release_region(HT_CONFIG_PORT, 1);
}
module_init(ht6560b_init); module_init(ht6560b_init);
module_exit(ht6560b_exit);
#endif #endif
MODULE_AUTHOR("See Local File"); MODULE_AUTHOR("See Local File");
......
...@@ -311,29 +311,7 @@ int __init pdc4030_init(void) ...@@ -311,29 +311,7 @@ int __init pdc4030_init(void)
} }
#ifdef MODULE #ifdef MODULE
static void __exit pdc4030_release_hwif(ide_hwif_t *hwif)
{
hwif->chipset = ide_unknown;
hwif->selectproc = NULL;
hwif->serialized = 0;
hwif->drives[0].io_32bit = 0;
hwif->drives[1].io_32bit = 0;
hwif->drives[0].keep_settings = 0;
hwif->drives[1].keep_settings = 0;
hwif->drives[0].noprobe = 0;
hwif->drives[1].noprobe = 0;
}
static void __exit pdc4030_exit(void)
{
unsigned int index;
for (index = 0; index < MAX_HWIFS; index++)
pdc4030_release_hwif(&ide_hwifs[index]);
}
module_init(pdc4030_init); module_init(pdc4030_init);
module_exit(pdc4030_exit);
#endif #endif
MODULE_AUTHOR("Peter Denison"); MODULE_AUTHOR("Peter Denison");
......
...@@ -262,7 +262,7 @@ static void qd6580_tune_drive (ide_drive_t *drive, u8 pio) ...@@ -262,7 +262,7 @@ static void qd6580_tune_drive (ide_drive_t *drive, u8 pio)
if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) { if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
pio = ide_get_best_pio_mode(drive, pio, 255, &d); pio = ide_get_best_pio_mode(drive, pio, 255, &d);
pio = IDE_MIN(pio,4); pio = min_t(u8, pio, 4);
switch (pio) { switch (pio) {
case 0: break; case 0: break;
...@@ -354,12 +354,12 @@ static void __init qd_setup(ide_hwif_t *hwif, int base, int config, ...@@ -354,12 +354,12 @@ static void __init qd_setup(ide_hwif_t *hwif, int base, int config,
probe_hwif_init(hwif); probe_hwif_init(hwif);
} }
#ifdef MODULE
/* /*
* qd_unsetup: * qd_unsetup:
* *
* called to unsetup an ata channel : back to default values, unlinks tuning * called to unsetup an ata channel : back to default values, unlinks tuning
*/ */
/*
static void __exit qd_unsetup(ide_hwif_t *hwif) static void __exit qd_unsetup(ide_hwif_t *hwif)
{ {
u8 config = hwif->config_data; u8 config = hwif->config_data;
...@@ -389,7 +389,7 @@ static void __exit qd_unsetup(ide_hwif_t *hwif) ...@@ -389,7 +389,7 @@ static void __exit qd_unsetup(ide_hwif_t *hwif)
printk(KERN_WARNING "keeping settings !\n"); printk(KERN_WARNING "keeping settings !\n");
} }
} }
#endif */
/* /*
* qd_probe: * qd_probe:
...@@ -496,14 +496,7 @@ int __init qd65xx_init(void) ...@@ -496,14 +496,7 @@ int __init qd65xx_init(void)
} }
#ifdef MODULE #ifdef MODULE
static void __exit qd65xx_exit(void)
{
qd_unsetup(&ide_hwifs[0]);
qd_unsetup(&ide_hwifs[1]);
}
module_init(qd65xx_init); module_init(qd65xx_init);
module_exit(qd65xx_exit);
#endif #endif
MODULE_AUTHOR("Samuel Thibault"); MODULE_AUTHOR("Samuel Thibault");
......
...@@ -173,33 +173,7 @@ int __init umc8672_init(void) ...@@ -173,33 +173,7 @@ int __init umc8672_init(void)
} }
#ifdef MODULE #ifdef MODULE
static void __exit umc8672_release_hwif(ide_hwif_t *hwif)
{
if (hwif->chipset != ide_umc8672)
return;
hwif->chipset = ide_unknown;
hwif->tuneproc = NULL;
hwif->mate = NULL;
hwif->channel = 0;
}
static void __exit umc8672_exit(void)
{
unsigned long flags;
umc8672_release_hwif(&ide_hwifs[0]);
umc8672_release_hwif(&ide_hwifs[1]);
local_irq_save(flags);
outb_p(0xa5, 0x108); /* disable umc */
local_irq_restore(flags);
release_region(0x108, 2);
}
module_init(umc8672_init); module_init(umc8672_init);
module_exit(umc8672_exit);
#endif #endif
MODULE_AUTHOR("Wolfram Podien"); MODULE_AUTHOR("Wolfram Podien");
......
...@@ -200,7 +200,7 @@ static void program_drive_counts (ide_drive_t *drive, int setup_count, int activ ...@@ -200,7 +200,7 @@ static void program_drive_counts (ide_drive_t *drive, int setup_count, int activ
*/ */
if (channel) { if (channel) {
drive->drive_data = setup_count; drive->drive_data = setup_count;
setup_count = IDE_MAX(drives[0].drive_data, setup_count = max(drives[0].drive_data,
drives[1].drive_data); drives[1].drive_data);
cmdprintk("Secondary interface, setup_count = %d\n", cmdprintk("Secondary interface, setup_count = %d\n",
setup_count); setup_count);
......
...@@ -132,6 +132,9 @@ static struct pci_device_id generic_pci_tbl[] = { ...@@ -132,6 +132,9 @@ static struct pci_device_id generic_pci_tbl[] = {
{ PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7}, { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7},
{ PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8}, { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8},
{ PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237_SATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9}, { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237_SATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9},
{ PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10},
{ PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11},
{ PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12},
{ 0, }, { 0, },
}; };
MODULE_DEVICE_TABLE(pci, generic_pci_tbl); MODULE_DEVICE_TABLE(pci, generic_pci_tbl);
......
...@@ -129,6 +129,33 @@ static ide_pci_device_t generic_chipsets[] __devinitdata = { ...@@ -129,6 +129,33 @@ static ide_pci_device_t generic_chipsets[] __devinitdata = {
.enablebits = {{0x00,0x00,0x00}, {0x00,0x00,0x00}}, .enablebits = {{0x00,0x00,0x00}, {0x00,0x00,0x00}},
.bootable = OFF_BOARD, .bootable = OFF_BOARD,
.extra = 0, .extra = 0,
},{ /* 10 */
.vendor = PCI_VENDOR_ID_TOSHIBA,
.device = PCI_DEVICE_ID_TOSHIBA_PICCOLO,
.name = "Piccolo0102",
.init_chipset = init_chipset_generic,
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ /* 11 */
.vendor = PCI_VENDOR_ID_TOSHIBA,
.device = PCI_DEVICE_ID_TOSHIBA_PICCOLO_1,
.name = "Piccolo0103",
.init_chipset = init_chipset_generic,
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ /* 12 */
.vendor = PCI_VENDOR_ID_TOSHIBA,
.device = PCI_DEVICE_ID_TOSHIBA_PICCOLO_2,
.name = "Piccolo0105",
.init_chipset = init_chipset_generic,
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ },{
.vendor = 0, .vendor = 0,
.device = 0, .device = 0,
......
...@@ -375,7 +375,7 @@ static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio) ...@@ -375,7 +375,7 @@ static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio)
return; return;
} }
via_set_drive(drive, XFER_PIO_0 + MIN(pio, 5)); via_set_drive(drive, XFER_PIO_0 + min_t(u8, pio, 5));
} }
/** /**
......
...@@ -146,7 +146,7 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne ...@@ -146,7 +146,7 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
idescsi_discard_data (drive, bcount); idescsi_discard_data (drive, bcount);
return; return;
} }
count = IDE_MIN (pc->sg->length - pc->b_count, bcount); count = min(pc->sg->length - pc->b_count, bcount);
buf = page_address(pc->sg->page) + pc->sg->offset; buf = page_address(pc->sg->page) + pc->sg->offset;
atapi_input_bytes (drive, buf + pc->b_count, count); atapi_input_bytes (drive, buf + pc->b_count, count);
bcount -= count; pc->b_count += count; bcount -= count; pc->b_count += count;
...@@ -168,7 +168,7 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign ...@@ -168,7 +168,7 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
idescsi_output_zeros (drive, bcount); idescsi_output_zeros (drive, bcount);
return; return;
} }
count = IDE_MIN (pc->sg->length - pc->b_count, bcount); count = min(pc->sg->length - pc->b_count, bcount);
buf = page_address(pc->sg->page) + pc->sg->offset; buf = page_address(pc->sg->page) + pc->sg->offset;
atapi_output_bytes (drive, buf + pc->b_count, count); atapi_output_bytes (drive, buf + pc->b_count, count);
bcount -= count; pc->b_count += count; bcount -= count; pc->b_count += count;
...@@ -396,7 +396,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs) ...@@ -396,7 +396,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
if (!test_bit(PC_WRITING, &pc->flags) && pc->actually_transferred && pc->actually_transferred <= 1024 && pc->buffer) { if (!test_bit(PC_WRITING, &pc->flags) && pc->actually_transferred && pc->actually_transferred <= 1024 && pc->buffer) {
printk(", rst = "); printk(", rst = ");
scsi_buf = pc->scsi_cmd->request_buffer; scsi_buf = pc->scsi_cmd->request_buffer;
hexdump(scsi_buf, IDE_MIN(16, pc->scsi_cmd->request_bufflen)); hexdump(scsi_buf, min_t(unsigned, 16, pc->scsi_cmd->request_bufflen));
} else printk("\n"); } else printk("\n");
} }
} }
...@@ -413,7 +413,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs) ...@@ -413,7 +413,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
static inline unsigned long get_timeout(idescsi_pc_t *pc) static inline unsigned long get_timeout(idescsi_pc_t *pc)
{ {
return IDE_MAX(WAIT_CMD, pc->timeout - jiffies); return max_t(unsigned long, WAIT_CMD, pc->timeout - jiffies);
} }
static int idescsi_expiry(ide_drive_t *drive) static int idescsi_expiry(ide_drive_t *drive)
...@@ -580,7 +580,7 @@ static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc) ...@@ -580,7 +580,7 @@ static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc)
scsi->pc=pc; /* Set the current packet command */ scsi->pc=pc; /* Set the current packet command */
pc->actually_transferred=0; /* We haven't transferred any data yet */ pc->actually_transferred=0; /* We haven't transferred any data yet */
pc->current_position=pc->buffer; pc->current_position=pc->buffer;
bcount.all = IDE_MIN(pc->request_transfer, 63 * 1024); /* Request to transfer the entire buffer at once */ bcount.all = min(pc->request_transfer, 63 * 1024); /* Request to transfer the entire buffer at once */
feature.all = 0; feature.all = 0;
if (drive->using_dma && rq->bio) { if (drive->using_dma && rq->bio) {
......
...@@ -44,7 +44,7 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -44,7 +44,7 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc"); : "cc");
} }
static inline void atomic_add(int i, volatile atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
unsigned long tmp, tmp2; unsigned long tmp, tmp2;
...@@ -59,7 +59,7 @@ static inline void atomic_add(int i, volatile atomic_t *v) ...@@ -59,7 +59,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
: "cc"); : "cc");
} }
static inline void atomic_sub(int i, volatile atomic_t *v) static inline void atomic_sub(int i, atomic_t *v)
{ {
unsigned long tmp, tmp2; unsigned long tmp, tmp2;
...@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v) ...@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
#define atomic_inc(v) atomic_add(1, v) #define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v) #define atomic_dec(v) atomic_sub(1, v)
static inline int atomic_dec_and_test(volatile atomic_t *v) static inline int atomic_dec_and_test(atomic_t *v)
{ {
unsigned long tmp; unsigned long tmp;
int result; int result;
...@@ -95,7 +95,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v) ...@@ -95,7 +95,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
return result == 0; return result == 0;
} }
static inline int atomic_add_negative(int i, volatile atomic_t *v) static inline int atomic_add_negative(int i, atomic_t *v)
{ {
unsigned long tmp; unsigned long tmp;
int result; int result;
...@@ -138,7 +138,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -138,7 +138,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
static inline void atomic_add(int i, volatile atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
...@@ -147,7 +147,7 @@ static inline void atomic_add(int i, volatile atomic_t *v) ...@@ -147,7 +147,7 @@ static inline void atomic_add(int i, volatile atomic_t *v)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void atomic_sub(int i, volatile atomic_t *v) static inline void atomic_sub(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
...@@ -156,7 +156,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v) ...@@ -156,7 +156,7 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void atomic_inc(volatile atomic_t *v) static inline void atomic_inc(atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
...@@ -165,7 +165,7 @@ static inline void atomic_inc(volatile atomic_t *v) ...@@ -165,7 +165,7 @@ static inline void atomic_inc(volatile atomic_t *v)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void atomic_dec(volatile atomic_t *v) static inline void atomic_dec(atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
...@@ -174,7 +174,7 @@ static inline void atomic_dec(volatile atomic_t *v) ...@@ -174,7 +174,7 @@ static inline void atomic_dec(volatile atomic_t *v)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline int atomic_dec_and_test(volatile atomic_t *v) static inline int atomic_dec_and_test(atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
int val; int val;
...@@ -187,7 +187,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v) ...@@ -187,7 +187,7 @@ static inline int atomic_dec_and_test(volatile atomic_t *v)
return val == 0; return val == 0;
} }
static inline int atomic_add_negative(int i, volatile atomic_t *v) static inline int atomic_add_negative(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
int val; int val;
......
#ifndef __ASM_ARM_DIV64 #ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64 #define __ASM_ARM_DIV64
#include <asm/system.h>
/* /*
* The semantics of do_div() are: * The semantics of do_div() are:
* *
...@@ -31,7 +33,11 @@ ...@@ -31,7 +33,11 @@
register unsigned long long __n asm("r0") = n; \ register unsigned long long __n asm("r0") = n; \
register unsigned long long __res asm("r2"); \ register unsigned long long __res asm("r2"); \
register unsigned int __rem asm(__xh); \ register unsigned int __rem asm(__xh); \
asm("bl __do_div64" \ asm( __asmeq("%0", __xh) \
__asmeq("%1", "r2") \
__asmeq("%2", "r0") \
__asmeq("%3", "r4") \
"bl __do_div64" \
: "=r" (__rem), "=r" (__res) \ : "=r" (__rem), "=r" (__res) \
: "r" (__n), "r" (__base) \ : "r" (__n), "r" (__base) \
: "ip", "lr", "cc"); \ : "ip", "lr", "cc"); \
......
...@@ -16,29 +16,6 @@ ...@@ -16,29 +16,6 @@
*/ */
extern void consistent_sync(void *kaddr, size_t size, int rw); extern void consistent_sync(void *kaddr, size_t size, int rw);
/*
* For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs.
*/
dma_addr_t sa1111_map_single(struct device *dev, void *, size_t, enum dma_data_direction);
void sa1111_unmap_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
int sa1111_map_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_unmap_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
#ifdef CONFIG_SA1111
extern struct bus_type sa1111_bus_type;
#define dmadev_is_sa1111(dev) ((dev)->bus == &sa1111_bus_type)
#else
#define dmadev_is_sa1111(dev) (0)
#endif
/* /*
* Return whether the given device DMA address mask can be supported * Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits * properly. For example, if your device can only drive the low 24-bits
...@@ -70,6 +47,14 @@ static inline int dma_is_consistent(dma_addr_t handle) ...@@ -70,6 +47,14 @@ static inline int dma_is_consistent(dma_addr_t handle)
return 0; return 0;
} }
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return dma_addr == ~0;
}
/** /**
* dma_alloc_coherent - allocate consistent memory for DMA * dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -118,6 +103,7 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int ...@@ -118,6 +103,7 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
#define dma_free_writecombine(dev,size,cpu_addr,handle) \ #define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle) dma_free_coherent(dev,size,cpu_addr,handle)
/** /**
* dma_map_single - map a single buffer for streaming DMA * dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -132,16 +118,17 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int ...@@ -132,16 +118,17 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
* can regain ownership by calling dma_unmap_single() or * can regain ownership by calling dma_unmap_single() or
* dma_sync_single_for_cpu(). * dma_sync_single_for_cpu().
*/ */
#ifndef CONFIG_DMABOUNCE
static inline dma_addr_t static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size, dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev))
return sa1111_map_single(dev, cpu_addr, size, dir);
consistent_sync(cpu_addr, size, dir); consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr); return __virt_to_bus((unsigned long)cpu_addr);
} }
#else
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
#endif
/** /**
* dma_map_page - map a portion of a page for streaming DMA * dma_map_page - map a portion of a page for streaming DMA
...@@ -180,15 +167,16 @@ dma_map_page(struct device *dev, struct page *page, ...@@ -180,15 +167,16 @@ dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the CPU to the buffer are guaranteed to see * After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
#ifndef CONFIG_DMABOUNCE
static inline void static inline void
dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev))
sa1111_unmap_single(dev, handle, size, dir);
/* nothing to do */ /* nothing to do */
} }
#else
extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
#endif
/** /**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page() * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
...@@ -233,15 +221,13 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, ...@@ -233,15 +221,13 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
* Device ownership issues as mentioned above for dma_map_single are * Device ownership issues as mentioned above for dma_map_single are
* the same here. * the same here.
*/ */
#ifndef CONFIG_DMABOUNCE
static inline int static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
int i; int i;
if (dmadev_is_sa1111(dev))
return sa1111_map_sg(dev, sg, nents, dir);
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt; char *virt;
...@@ -252,6 +238,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -252,6 +238,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents; return nents;
} }
#else
extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
#endif
/** /**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
...@@ -264,17 +253,18 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -264,17 +253,18 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* Again, CPU read rules concerning calls here are the same as for * Again, CPU read rules concerning calls here are the same as for
* dma_unmap_single() above. * dma_unmap_single() above.
*/ */
#ifndef CONFIG_DMABOUNCE
static inline void static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev)) {
sa1111_unmap_sg(dev, sg, nents, dir);
return;
}
/* nothing to do */ /* nothing to do */
} }
#else
extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
#endif
/** /**
* dma_sync_single_for_cpu * dma_sync_single_for_cpu
...@@ -293,15 +283,11 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -293,15 +283,11 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
* must first the perform a dma_sync_for_device, and then the * must first the perform a dma_sync_for_device, and then the
* device again owns the buffer. * device again owns the buffer.
*/ */
#ifndef CONFIG_DMABOUNCE
static inline void static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single_for_cpu(dev, handle, size, dir);
return;
}
consistent_sync((void *)__bus_to_virt(handle), size, dir); consistent_sync((void *)__bus_to_virt(handle), size, dir);
} }
...@@ -309,13 +295,13 @@ static inline void ...@@ -309,13 +295,13 @@ static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single_for_device(dev, handle, size, dir);
return;
}
consistent_sync((void *)__bus_to_virt(handle), size, dir); consistent_sync((void *)__bus_to_virt(handle), size, dir);
} }
#else
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
#endif
/** /**
* dma_sync_sg_for_cpu * dma_sync_sg_for_cpu
...@@ -330,17 +316,13 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, ...@@ -330,17 +316,13 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
* The same as dma_sync_single_for_* but for a scatter-gather list, * The same as dma_sync_single_for_* but for a scatter-gather list,
* same rules and usage. * same rules and usage.
*/ */
#ifndef CONFIG_DMABOUNCE
static inline void static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
int i; int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg_for_cpu(dev, sg, nents, dir);
return;
}
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset; char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir); consistent_sync(virt, sg->length, dir);
...@@ -353,24 +335,73 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, ...@@ -353,24 +335,73 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
{ {
int i; int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg_for_device(dev, sg, nents, dir);
return;
}
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset; char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir); consistent_sync(virt, sg->length, dir);
} }
} }
#else
extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
#endif
#ifdef CONFIG_DMABOUNCE
/* /*
* DMA errors are defined by all-bits-set in the DMA address. * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
*
* On the SA-1111, a bug limits DMA to only certain regions of RAM.
* On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
* On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM)
*
* The following are helper functions used by the dmabounce subystem
*
*/ */
static inline int dma_mapping_error(dma_addr_t dma_addr)
{ /**
return dma_addr == ~0; * dmabounce_register_dev
} *
* @dev: valid struct device pointer
* @small_buf_size: size of buffers to use with small buffer pool
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
*
* This function should be called by low-level platform code to register
* a device as requireing DMA buffer bouncing. The function will allocate
* appropriate DMA pools for the device.
*
*/
extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
/**
* dmabounce_unregister_dev
*
* @dev: valid struct device pointer
*
* This function should be called by low-level platform code when device
* that was previously registered with dmabounce_register_dev is removed
* from the system.
*
*/
extern void dmabounce_unregister_dev(struct device *);
/**
* dma_needs_bounce
*
* @dev: valid struct device pointer
* @dma_handle: dma_handle of unbounced buffer
* @size: size of region being mapped
*
* Platforms that utilize the dmabounce mechanism must implement
* this function.
*
* The dmabounce routines call this function whenever a dma-mapping
* is requested to determine whether a given buffer needs to be bounced
* or not. The function must return 0 if the the buffer is OK for
* DMA access and 1 if the buffer needs to be bounced.
*
*/
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
#endif /* CONFIG_DMABOUNCE */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -42,6 +42,15 @@ ...@@ -42,6 +42,15 @@
#define CR_XP (1 << 23) /* Extended page tables */ #define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */ #define CR_VE (1 << 24) /* Vectored interrupts */
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust
* the compiler from one version to another so a bit of paranoia won't hurt.
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/kernel.h> #include <linux/kernel.h>
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/arch/memory.h> #include <asm/arch/memory.h>
#include <asm/domain.h> #include <asm/domain.h>
#include <asm/system.h>
#define VERIFY_READ 0 #define VERIFY_READ 0
#define VERIFY_WRITE 1 #define VERIFY_WRITE 1
...@@ -107,7 +108,9 @@ extern int __get_user_8(void *); ...@@ -107,7 +108,9 @@ extern int __get_user_8(void *);
extern int __get_user_bad(void); extern int __get_user_bad(void);
#define __get_user_x(__r1,__p,__e,__s,__i...) \ #define __get_user_x(__r1,__p,__e,__s,__i...) \
__asm__ __volatile__ ("bl __get_user_" #__s \ __asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r1") \
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r1) \ : "=&r" (__e), "=r" (__r1) \
: "0" (__p) \ : "0" (__p) \
: __i, "cc") : __i, "cc")
...@@ -223,7 +226,9 @@ extern int __put_user_8(void *, unsigned long long); ...@@ -223,7 +226,9 @@ extern int __put_user_8(void *, unsigned long long);
extern int __put_user_bad(void); extern int __put_user_bad(void);
#define __put_user_x(__r1,__p,__e,__s) \ #define __put_user_x(__r1,__p,__e,__s) \
__asm__ __volatile__ ("bl __put_user_" #__s \ __asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \ : "=&r" (__e) \
: "0" (__p), "r" (__r1) \ : "0" (__p), "r" (__r1) \
: "ip", "lr", "cc") : "ip", "lr", "cc")
......
...@@ -215,8 +215,6 @@ typedef unsigned char byte; /* used everywhere */ ...@@ -215,8 +215,6 @@ typedef unsigned char byte; /* used everywhere */
#define SECTOR_SIZE 512 #define SECTOR_SIZE 512
#define SECTOR_WORDS (SECTOR_SIZE / 4) /* number of 32bit words per sector */ #define SECTOR_WORDS (SECTOR_SIZE / 4) /* number of 32bit words per sector */
#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t))) #define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
#define IDE_MIN(a,b) ((a)<(b) ? (a):(b))
#define IDE_MAX(a,b) ((a)>(b) ? (a):(b))
/* /*
* Timeouts for various operations: * Timeouts for various operations:
......
...@@ -1383,6 +1383,9 @@ ...@@ -1383,6 +1383,9 @@
#define PCI_DEVICE_ID_SBE_WANXL400 0x0104 #define PCI_DEVICE_ID_SBE_WANXL400 0x0104
#define PCI_VENDOR_ID_TOSHIBA 0x1179 #define PCI_VENDOR_ID_TOSHIBA 0x1179
#define PCI_DEVICE_ID_TOSHIBA_PICCOLO 0x0102
#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0103
#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0105
#define PCI_DEVICE_ID_TOSHIBA_601 0x0601 #define PCI_DEVICE_ID_TOSHIBA_601 0x0601
#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a #define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
#define PCI_DEVICE_ID_TOSHIBA_TOPIC95_A 0x0603 #define PCI_DEVICE_ID_TOSHIBA_TOPIC95_A 0x0603
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment