dmabounce.c 13.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 *  arch/arm/common/dmabounce.c
 *
 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
 *  limited DMA windows. These functions utilize bounce buffers to
 *  copy data to/from buffers located outside the DMA region. This
 *  only works for systems in which DMA memory is at the bottom of
8
 *  RAM, the remainder of memory is at the top and the DMA memory
Simon Arlott's avatar
Simon Arlott committed
9
 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
Linus Torvalds's avatar
Linus Torvalds committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *  DMA windows will require custom implementations that reserve memory
 *  areas at early bootup.
 *
 *  Original version by Brad Parker (brad@heeltoe.com)
 *  Re-written by Christopher Hoover <ch@murgatroid.com>
 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
 *
 *  Copyright (C) 2002 Hewlett Packard Company.
 *  Copyright (C) 2004 MontaVista Software, Inc.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  version 2 as published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
FUJITA Tomonori's avatar
FUJITA Tomonori committed
32
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
33

34 35
#include <asm/cacheflush.h>

Linus Torvalds's avatar
Linus Torvalds committed
36
#undef STATS
Russell King's avatar
Russell King committed
37

Linus Torvalds's avatar
Linus Torvalds committed
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
#else
#define DO_STATS(X) do { } while (0)
#endif

/* ************************************************** */

struct safe_buffer {
	struct list_head node;

	/* original request */
	void		*ptr;
	size_t		size;
	int		direction;

	/* safe buffer info */
Russell King's avatar
Russell King committed
55
	struct dmabounce_pool *pool;
Linus Torvalds's avatar
Linus Torvalds committed
56 57 58 59
	void		*safe;
	dma_addr_t	safe_dma_addr;
};

Russell King's avatar
Russell King committed
60 61 62 63 64 65 66 67
struct dmabounce_pool {
	unsigned long	size;
	struct dma_pool	*pool;
#ifdef STATS
	unsigned long	allocs;
#endif
};

Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71 72 73 74
struct dmabounce_device_info {
	struct device *dev;
	struct list_head safe_buffers;
#ifdef STATS
	unsigned long total_allocs;
	unsigned long map_op_count;
	unsigned long bounce_count;
75
	int attr_res;
Linus Torvalds's avatar
Linus Torvalds committed
76
#endif
Russell King's avatar
Russell King committed
77 78
	struct dmabounce_pool	small;
	struct dmabounce_pool	large;
79 80

	rwlock_t lock;
Linus Torvalds's avatar
Linus Torvalds committed
81 82 83
};

#ifdef STATS
84 85
static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
			      char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
86
{
87 88 89 90
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
		device_info->small.allocs,
		device_info->large.allocs,
Russell King's avatar
Russell King committed
91 92
		device_info->total_allocs - device_info->small.allocs -
			device_info->large.allocs,
93 94 95
		device_info->total_allocs,
		device_info->map_op_count,
		device_info->bounce_count);
Linus Torvalds's avatar
Linus Torvalds committed
96
}
97 98

static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101 102 103 104
#endif


/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
Russell King's avatar
Russell King committed
105
		  size_t size, enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
106 107
{
	struct safe_buffer *buf;
Russell King's avatar
Russell King committed
108
	struct dmabounce_pool *pool;
Linus Torvalds's avatar
Linus Torvalds committed
109
	struct device *dev = device_info->dev;
110
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
111 112 113 114

	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
		__func__, ptr, size, dir);

Russell King's avatar
Russell King committed
115 116 117 118 119 120 121
	if (size <= device_info->small.size) {
		pool = &device_info->small;
	} else if (size <= device_info->large.size) {
		pool = &device_info->large;
	} else {
		pool = NULL;
	}
Linus Torvalds's avatar
Linus Torvalds committed
122 123 124 125 126 127 128

	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
	if (buf == NULL) {
		dev_warn(dev, "%s: kmalloc failed\n", __func__);
		return NULL;
	}

Russell King's avatar
Russell King committed
129 130 131 132
	buf->ptr = ptr;
	buf->size = size;
	buf->direction = dir;
	buf->pool = pool;
Linus Torvalds's avatar
Linus Torvalds committed
133

Russell King's avatar
Russell King committed
134 135 136
	if (pool) {
		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
					   &buf->safe_dma_addr);
Linus Torvalds's avatar
Linus Torvalds committed
137
	} else {
Russell King's avatar
Russell King committed
138 139
		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
					       GFP_ATOMIC);
Linus Torvalds's avatar
Linus Torvalds committed
140 141
	}

Russell King's avatar
Russell King committed
142 143 144 145
	if (buf->safe == NULL) {
		dev_warn(dev,
			 "%s: could not alloc dma memory (size=%d)\n",
			 __func__, size);
Linus Torvalds's avatar
Linus Torvalds committed
146 147 148 149 150
		kfree(buf);
		return NULL;
	}

#ifdef STATS
Russell King's avatar
Russell King committed
151 152 153
	if (pool)
		pool->allocs++;
	device_info->total_allocs++;
Linus Torvalds's avatar
Linus Torvalds committed
154 155
#endif

156
	write_lock_irqsave(&device_info->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
157
	list_add(&buf->node, &device_info->safe_buffers);
158 159
	write_unlock_irqrestore(&device_info->lock, flags);

Linus Torvalds's avatar
Linus Torvalds committed
160 161 162 163 164 165 166
	return buf;
}

/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
167
	struct safe_buffer *b, *rb = NULL;
168 169 170
	unsigned long flags;

	read_lock_irqsave(&device_info->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
171

172
	list_for_each_entry(b, &device_info->safe_buffers, node)
173 174
		if (b->safe_dma_addr == safe_dma_addr) {
			rb = b;
175
			break;
176
		}
Linus Torvalds's avatar
Linus Torvalds committed
177

178
	read_unlock_irqrestore(&device_info->lock, flags);
179
	return rb;
Linus Torvalds's avatar
Linus Torvalds committed
180 181 182 183 184
}

static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{
185 186
	unsigned long flags;

Linus Torvalds's avatar
Linus Torvalds committed
187 188
	dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);

189 190
	write_lock_irqsave(&device_info->lock, flags);

Linus Torvalds's avatar
Linus Torvalds committed
191 192
	list_del(&buf->node);

193 194
	write_unlock_irqrestore(&device_info->lock, flags);

Linus Torvalds's avatar
Linus Torvalds committed
195
	if (buf->pool)
Russell King's avatar
Russell King committed
196
		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
Linus Torvalds's avatar
Linus Torvalds committed
197 198 199 200 201 202 203 204 205
	else
		dma_free_coherent(device_info->dev, buf->size, buf->safe,
				    buf->safe_dma_addr);

	kfree(buf);
}

/* ************************************************** */

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
		dma_addr_t dma_addr, const char *where)
{
	if (!dev || !dev->archdata.dmabounce)
		return NULL;
	if (dma_mapping_error(dev, dma_addr)) {
		if (dev)
			dev_err(dev, "Trying to %s invalid mapping\n", where);
		else
			pr_err("unknown device: Trying to %s invalid mapping\n", where);
		return NULL;
	}
	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}

221
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
Linus Torvalds's avatar
Linus Torvalds committed
222 223
		enum dma_data_direction dir)
{
224
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
Linus Torvalds's avatar
Linus Torvalds committed
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

		dev_dbg(dev,
261 262 263
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);
Linus Torvalds's avatar
Linus Torvalds committed
264 265 266 267 268 269 270

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
Russell King's avatar
Russell King committed
271
		ptr = buf->safe;
Linus Torvalds's avatar
Linus Torvalds committed
272 273

		dma_addr = buf->safe_dma_addr;
274 275 276 277 278
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
279
		dma_cache_maint(ptr, size, dir);
Linus Torvalds's avatar
Linus Torvalds committed
280 281 282 283 284
	}

	return dma_addr;
}

285 286
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
287
{
288
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
Linus Torvalds's avatar
Linus Torvalds committed
289 290 291

	if (buf) {
		BUG_ON(buf->size != size);
292
		BUG_ON(buf->direction != dir);
Linus Torvalds's avatar
Linus Torvalds committed
293 294

		dev_dbg(dev,
295 296 297
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);
Linus Torvalds's avatar
Linus Torvalds committed
298

299
		DO_STATS(dev->archdata.dmabounce->bounce_count++);
Linus Torvalds's avatar
Linus Torvalds committed
300

301
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
302
			void *ptr = buf->ptr;
303

Linus Torvalds's avatar
Linus Torvalds committed
304 305
			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
306 307
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);
308 309 310 311 312 313 314 315 316 317

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
318
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
Linus Torvalds's avatar
Linus Torvalds committed
319
		}
320
		free_safe_buffer(dev->archdata.dmabounce, buf);
Linus Torvalds's avatar
Linus Torvalds committed
321 322 323 324 325 326 327 328 329 330 331
	}
}

/* ************************************************** */

/*
 * see if a buffer address is in an 'unsafe' range.  if it is
 * allocate a 'safe' buffer and copy the unsafe buffer into it.
 * substitute the safe buffer for the unsafe one.
 * (basically move the buffer from an unsafe area to a safe one)
 */
332
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
Linus Torvalds's avatar
Linus Torvalds committed
333 334 335 336 337
		enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, ptr, size, dir);

338
	BUG_ON(!valid_dma_direction(dir));
Linus Torvalds's avatar
Linus Torvalds committed
339

340
	return map_single(dev, ptr, size, dir);
Linus Torvalds's avatar
Linus Torvalds committed
341
}
342
EXPORT_SYMBOL(dma_map_single);
Linus Torvalds's avatar
Linus Torvalds committed
343

344
dma_addr_t dma_map_page(struct device *dev, struct page *page,
345
		unsigned long offset, size_t size, enum dma_data_direction dir)
346 347 348 349
{
	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
		__func__, page, offset, size, dir);

350
	BUG_ON(!valid_dma_direction(dir));
351 352 353 354 355

	return map_single(dev, page_address(page) + offset, size, dir);
}
EXPORT_SYMBOL(dma_map_page);

Linus Torvalds's avatar
Linus Torvalds committed
356 357 358 359 360 361 362
/*
 * see if a mapped address was really a "safe" buffer and if so, copy
 * the data from the safe buffer back to the unsafe buffer and free up
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */

363 364
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
365 366 367 368 369 370
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, (void *) dma_addr, size, dir);

	unmap_single(dev, dma_addr, size, dir);
}
371
EXPORT_SYMBOL(dma_unmap_single);
Linus Torvalds's avatar
Linus Torvalds committed
372

373 374
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
375
{
376 377 378
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
379
		__func__, addr, off, sz, dir);
380 381 382 383 384

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

385 386
	BUG_ON(buf->direction != dir);

387 388 389 390 391 392 393 394 395 396 397 398
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
			__func__, buf->safe + off, buf->ptr + off, sz);
		memcpy(buf->ptr + off, buf->safe + off, sz);
	}
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
399
}
400
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
Linus Torvalds's avatar
Linus Torvalds committed
401

402 403
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
404
{
405 406 407
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
408
		__func__, addr, off, sz, dir);
409 410 411 412 413

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

414 415
	BUG_ON(buf->direction != dir);

416 417 418 419 420 421 422 423 424 425 426 427
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
			__func__,buf->ptr + off, buf->safe + off, sz);
		memcpy(buf->safe + off, buf->ptr + off, sz);
	}
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
428
}
429
EXPORT_SYMBOL(dmabounce_sync_for_device);
Linus Torvalds's avatar
Linus Torvalds committed
430

431 432
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
Russell King's avatar
Russell King committed
433 434 435 436 437 438 439 440 441 442
{
	pool->size = size;
	DO_STATS(pool->allocs = 0);
	pool->pool = dma_pool_create(name, dev, size,
				     0 /* byte alignment */,
				     0 /* no page-crossing issues */);

	return pool->pool ? 0 : -ENOMEM;
}

443 444
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
		unsigned long large_buffer_size)
Linus Torvalds's avatar
Linus Torvalds committed
445 446
{
	struct dmabounce_device_info *device_info;
Russell King's avatar
Russell King committed
447
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
448 449 450

	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
	if (!device_info) {
451 452
		dev_err(dev,
			"Could not allocated dmabounce_device_info\n");
Linus Torvalds's avatar
Linus Torvalds committed
453 454 455
		return -ENOMEM;
	}

Russell King's avatar
Russell King committed
456 457 458 459 460 461 462
	ret = dmabounce_init_pool(&device_info->small, dev,
				  "small_dmabounce_pool", small_buffer_size);
	if (ret) {
		dev_err(dev,
			"dmabounce: could not allocate DMA pool for %ld byte objects\n",
			small_buffer_size);
		goto err_free;
Linus Torvalds's avatar
Linus Torvalds committed
463 464 465
	}

	if (large_buffer_size) {
Russell King's avatar
Russell King committed
466 467 468 469 470 471 472 473
		ret = dmabounce_init_pool(&device_info->large, dev,
					  "large_dmabounce_pool",
					  large_buffer_size);
		if (ret) {
			dev_err(dev,
				"dmabounce: could not allocate DMA pool for %ld byte objects\n",
				large_buffer_size);
			goto err_destroy;
Linus Torvalds's avatar
Linus Torvalds committed
474 475 476 477 478
		}
	}

	device_info->dev = dev;
	INIT_LIST_HEAD(&device_info->safe_buffers);
479
	rwlock_init(&device_info->lock);
Linus Torvalds's avatar
Linus Torvalds committed
480 481 482 483 484

#ifdef STATS
	device_info->total_allocs = 0;
	device_info->map_op_count = 0;
	device_info->bounce_count = 0;
485
	device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds's avatar
Linus Torvalds committed
486 487
#endif

488
	dev->archdata.dmabounce = device_info;
Linus Torvalds's avatar
Linus Torvalds committed
489

490
	dev_info(dev, "dmabounce: registered device\n");
Linus Torvalds's avatar
Linus Torvalds committed
491 492

	return 0;
Russell King's avatar
Russell King committed
493 494 495 496 497 498

 err_destroy:
	dma_pool_destroy(device_info->small.pool);
 err_free:
	kfree(device_info);
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
499
}
500
EXPORT_SYMBOL(dmabounce_register_dev);
Linus Torvalds's avatar
Linus Torvalds committed
501

502
void dmabounce_unregister_dev(struct device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
503
{
504 505 506
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;

	dev->archdata.dmabounce = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
507 508

	if (!device_info) {
509 510 511
		dev_warn(dev,
			 "Never registered with dmabounce but attempting"
			 "to unregister!\n");
Linus Torvalds's avatar
Linus Torvalds committed
512 513 514 515
		return;
	}

	if (!list_empty(&device_info->safe_buffers)) {
516 517
		dev_err(dev,
			"Removing from dmabounce with pending buffers!\n");
Linus Torvalds's avatar
Linus Torvalds committed
518 519 520
		BUG();
	}

Russell King's avatar
Russell King committed
521 522 523 524
	if (device_info->small.pool)
		dma_pool_destroy(device_info->small.pool);
	if (device_info->large.pool)
		dma_pool_destroy(device_info->large.pool);
Linus Torvalds's avatar
Linus Torvalds committed
525 526

#ifdef STATS
527 528
	if (device_info->attr_res == 0)
		device_remove_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds's avatar
Linus Torvalds committed
529 530 531 532
#endif

	kfree(device_info);

533
	dev_info(dev, "dmabounce: device unregistered\n");
Linus Torvalds's avatar
Linus Torvalds committed
534 535 536 537 538 539
}
EXPORT_SYMBOL(dmabounce_unregister_dev);

MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
MODULE_LICENSE("GPL");