swiotlb.c 14.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Dynamic DMA mapping support.
 *
 * This implementation is for IA-64 platforms that do not support
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 *
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
 */

David Mosberger's avatar
David Mosberger committed
13
#include <linux/cache.h>
Linus Torvalds's avatar
Linus Torvalds committed
14
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
15
#include <linux/module.h>
Linus Torvalds's avatar
Linus Torvalds committed
16 17 18 19 20 21 22 23 24 25 26 27
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>

#include <asm/io.h>
#include <asm/pci.h>
#include <asm/dma.h>

#include <linux/init.h>
#include <linux/bootmem.h>

David Mosberger's avatar
David Mosberger committed
28 29 30 31
#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

#define SG_ENT_VIRT_ADDRESS(sg)	(page_address((sg)->page) + (sg)->offset)
Linus Torvalds's avatar
Linus Torvalds committed
32 33
#define SG_ENT_PHYS_ADDRESS(SG)	virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))

David Mosberger's avatar
David Mosberger committed
34 35 36 37 38 39 40
/*
 * Maximum allowable number of contiguous slabs to map,
 * must be a power of 2.  What is the appropriate value ?
 * The complexity of {map,unmap}_single is linearly dependent on this value.
 */
#define IO_TLB_SEGSIZE	128

Linus Torvalds's avatar
Linus Torvalds committed
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/*
 * log of the size of each IO TLB slab.  The number of slabs is command line controllable.
 */
#define IO_TLB_SHIFT 11

/*
 * Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single, to see
 * if the memory was in fact allocated by this API.
 */
static char *io_tlb_start, *io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
 * This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs = 1024;

/*
 * This is a free list describing the number of free entries available from each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
Linus Torvalds's avatar
Linus Torvalds committed
65
 * We need to save away the original address corresponding to a mapped entry for the sync
Linus Torvalds's avatar
Linus Torvalds committed
66 67 68 69 70 71
 * operations.
 */
static unsigned char **io_tlb_orig_addr;

/*
 * Protect the above data structures in the map and unmap calls
Linus Torvalds's avatar
Linus Torvalds committed
72
 */
Linus Torvalds's avatar
Linus Torvalds committed
73 74 75 76 77 78
static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;

static int __init
setup_io_tlb_npages (char *str)
{
	io_tlb_nslabs = simple_strtoul(str, NULL, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
David Mosberger's avatar
David Mosberger committed
79 80 81 82

	/* avoid tail segment of size < IO_TLB_SEGSIZE */
	io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);

Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86
	return 1;
}
__setup("swiotlb=", setup_io_tlb_npages);

David Mosberger's avatar
David Mosberger committed
87

Linus Torvalds's avatar
Linus Torvalds committed
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data structures for
 * the software IO TLB used to implement the PCI DMA API.
 */
void
swiotlb_init (void)
{
	int i;

	/*
	 * Get IO TLB memory from the low pages
	 */
	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
	if (!io_tlb_start)
		BUG();
	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);

	/*
	 * Allocate and initialize the free list array.  This array is used
David Mosberger's avatar
David Mosberger committed
107 108
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
Linus Torvalds's avatar
Linus Torvalds committed
109 110 111
	 */
	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
	for (i = 0; i < io_tlb_nslabs; i++)
David Mosberger's avatar
David Mosberger committed
112
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
Linus Torvalds's avatar
Linus Torvalds committed
113 114 115
	io_tlb_index = 0;
	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));

116
	printk(KERN_INFO "Placing software IO TLB between 0x%p - 0x%p\n",
Linus Torvalds's avatar
Linus Torvalds committed
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
	       (void *) io_tlb_start, (void *) io_tlb_end);
}

/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */
static void *
map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
{
	unsigned long flags;
	char *dma_addr;
	unsigned int nslots, stride, index, wrap;
	int i;

	/*
	 * For mappings greater than a page size, we limit the stride (and hence alignment)
	 * to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > (1 << PAGE_SHIFT))
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
David Mosberger's avatar
David Mosberger committed
139
		stride = 1;
Linus Torvalds's avatar
Linus Torvalds committed
140 141 142 143 144 145 146 147 148 149 150 151

	if (!nslots)
		BUG();

	/*
	 * Find suitable number of IO TLB entries size that will fit this request and
	 * allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		wrap = index = ALIGN(io_tlb_index, stride);

Linus Torvalds's avatar
Linus Torvalds committed
152
		if (index >= io_tlb_nslabs)
Linus Torvalds's avatar
Linus Torvalds committed
153 154 155 156 157 158 159 160 161 162 163 164 165
			wrap = index = 0;

		do {
			/*
			 * If we find a slot that indicates we have 'nslots' number of
			 * contiguous buffers, we allocate the buffers from that slot and
			 * mark the entries as '0' indicating unavailable.
			 */
			if (io_tlb_list[index] >= nslots) {
				int count = 0;

				for (i = index; i < index + nslots; i++)
					io_tlb_list[i] = 0;
David Mosberger's avatar
David Mosberger committed
166 167
				for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
				       && io_tlb_list[i]; i--)
Linus Torvalds's avatar
Linus Torvalds committed
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
					io_tlb_list[i] = ++count;
				dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);

				/*
				 * Update the indices to avoid searching in the next round.
				 */
				io_tlb_index = ((index + nslots) < io_tlb_nslabs
						? (index + nslots) : 0);

				goto found;
			}
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
		} while (index != wrap);

		/*
Linus Torvalds's avatar
Linus Torvalds committed
185
		 * XXX What is a suitable recovery mechanism here?  We cannot
Linus Torvalds's avatar
Linus Torvalds committed
186 187 188 189
		 * sleep because we are called from with in interrupts!
		 */
		panic("map_single: could not allocate software IO TLB (%ld bytes)", size);
	}
Linus Torvalds's avatar
Linus Torvalds committed
190
  found:
Linus Torvalds's avatar
Linus Torvalds committed
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.  This is
	 * needed when we sync the memory.  Then we sync the buffer if needed.
	 */
	io_tlb_orig_addr[index] = buffer;
	if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL)
		memcpy(dma_addr, buffer, size);

	return dma_addr;
}

/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
static void
unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
{
	unsigned long flags;
	int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	char *buffer = io_tlb_orig_addr[index];

	/*
	 * First, sync the memory before unmapping the entry
	 */
	if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL))
		/*
Linus Torvalds's avatar
Linus Torvalds committed
220 221 222
		 * bounce... copy the data back into the original buffer * and delete the
		 * bounce buffer.
		 */
Linus Torvalds's avatar
Linus Torvalds committed
223 224 225 226 227 228 229 230 231 232
		memcpy(buffer, dma_addr, size);

	/*
	 * Return the buffer to the free list by setting the corresponding entries to
	 * indicate the number of contigous entries available.  While returning the
	 * entries to the free list, we merge the entries with slots below and above the
	 * pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
David Mosberger's avatar
David Mosberger committed
233 234
		int count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			     io_tlb_list[index + nslots] : 0);
Linus Torvalds's avatar
Linus Torvalds committed
235 236 237 238 239 240 241 242 243 244
		/*
		 * Step 1: return the slots to the free list, merging the slots with
		 * superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceeding slots, if
		 * available (non zero)
		 */
David Mosberger's avatar
David Mosberger committed
245 246
		for (i = index - 1;  (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
		       io_tlb_list[i]; i--)
Linus Torvalds's avatar
Linus Torvalds committed
247 248 249 250 251 252 253 254 255 256 257 258
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}

static void
sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
{
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
	char *buffer = io_tlb_orig_addr[index];

	/*
Linus Torvalds's avatar
Linus Torvalds committed
259
	 * bounce... copy the data back into/from the original buffer
Linus Torvalds's avatar
Linus Torvalds committed
260
	 * XXX How do you handle PCI_DMA_BIDIRECTIONAL here ?
Linus Torvalds's avatar
Linus Torvalds committed
261
	 */
Linus Torvalds's avatar
Linus Torvalds committed
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	if (direction == PCI_DMA_FROMDEVICE)
		memcpy(buffer, dma_addr, size);
	else if (direction == PCI_DMA_TODEVICE)
		memcpy(dma_addr, buffer, size);
	else
		BUG();
}

void *
swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
{
	unsigned long pci_addr;
	int gfp = GFP_ATOMIC;
	void *ret;

277 278 279 280 281
	/*
	 * Alloc_consistent() is defined to return memory < 4GB, no matter what the DMA
	 * mask says.
	 */
	gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
Linus Torvalds's avatar
Linus Torvalds committed
282 283 284 285 286 287
	ret = (void *)__get_free_pages(gfp, get_order(size));
	if (!ret)
		return NULL;

	memset(ret, 0, size);
	pci_addr = virt_to_phys(ret);
Linus Torvalds's avatar
Linus Torvalds committed
288
	if (hwdev && (pci_addr & ~hwdev->dma_mask) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
		panic("swiotlb_alloc_consistent: allocated memory is out of range for PCI device");
	*dma_handle = pci_addr;
	return ret;
}

void
swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
	free_pages((unsigned long) vaddr, get_order(size));
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The PCI address
 * to use is returned.
 *
 * Once the device is given the dma address, the device owns this memory until either
 * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
 */
dma_addr_t
swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
	unsigned long pci_addr = virt_to_phys(ptr);

	if (direction == PCI_DMA_NONE)
		BUG();
	/*
	 * Check if the PCI device can DMA to ptr... if so, just return ptr
	 */
	if ((pci_addr & ~hwdev->dma_mask) == 0)
		/*
		 * Device is bit capable of DMA'ing to the buffer... just return the PCI
		 * address of ptr
		 */
		return pci_addr;

Linus Torvalds's avatar
Linus Torvalds committed
324 325
	/*
	 * get a bounce buffer:
Linus Torvalds's avatar
Linus Torvalds committed
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	 */
	pci_addr = virt_to_phys(map_single(hwdev, ptr, size, direction));

	/*
	 * Ensure that the address returned is DMA'ble:
	 */
	if ((pci_addr & ~hwdev->dma_mask) != 0)
		panic("map_single: bounce buffer is not DMA'ble");

	return pci_addr;
}

/*
 * Since DMA is i-cache coherent, any (complete) pages that were written via
 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
 * flush them when they get mapped into an executable vm-area.
 */
static void
mark_clean (void *addr, size_t size)
{
	unsigned long pg_addr, end;

	pg_addr = PAGE_ALIGN((unsigned long) addr);
	end = (unsigned long) addr + size;
	while (pg_addr + PAGE_SIZE <= end) {
Linus Torvalds's avatar
Linus Torvalds committed
351 352
		struct page *page = virt_to_page(pg_addr);
		set_bit(PG_arch_1, &page->flags);
Linus Torvalds's avatar
Linus Torvalds committed
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
		pg_addr += PAGE_SIZE;
	}
}

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must match what
 * was provided for in a previous swiotlb_map_single call.  All other usages are
 * undefined.
 *
 * After this call, reads by the cpu to the buffer are guarenteed to see whatever the
 * device wrote there.
 */
void
swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
{
	char *dma_addr = phys_to_virt(pci_addr);

	if (direction == PCI_DMA_NONE)
		BUG();
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
		unmap_single(hwdev, dma_addr, size, direction);
	else if (direction == PCI_DMA_FROMDEVICE)
		mark_clean(dma_addr, size);
}

/*
 * Make physical memory consistent for a single streaming mode DMA translation after a
 * transfer.
 *
 * If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu,
 * yet do not wish to teardown the PCI dma mapping, you must call this function before
 * doing so.  At the next point you give the PCI dma address back to the card, the device
 * again owns the buffer.
 */
void
swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
{
	char *dma_addr = phys_to_virt(pci_addr);

	if (direction == PCI_DMA_NONE)
		BUG();
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
		sync_single(hwdev, dma_addr, size, direction);
	else if (direction == PCI_DMA_FROMDEVICE)
		mark_clean(dma_addr, size);
}

/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.  This is the
 * scather-gather version of the above swiotlb_map_single interface.  Here the scatter
 * gather list elements are each tagged with the appropriate dma address and length.  They
 * are obtained via sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
 */
int
swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
{
Linus Torvalds's avatar
Linus Torvalds committed
417
	void *addr;
418
	unsigned long pci_addr;
Linus Torvalds's avatar
Linus Torvalds committed
419 420 421 422 423 424
	int i;

	if (direction == PCI_DMA_NONE)
		BUG();

	for (i = 0; i < nelems; i++, sg++) {
425 426 427
		addr = SG_ENT_VIRT_ADDRESS(sg);
		pci_addr = virt_to_phys(addr);
		if ((pci_addr & ~hwdev->dma_mask) != 0)
428 429
			sg->dma_address = (dma_addr_t)
				map_single(hwdev, addr, sg->length, direction);
430 431 432
		else
			sg->dma_address = pci_addr;
		sg->dma_length = sg->length;
Linus Torvalds's avatar
Linus Torvalds committed
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
	}
	return nelems;
}

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules concerning calls
 * here are the same as for swiotlb_unmap_single() above.
 */
void
swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
{
	int i;

	if (direction == PCI_DMA_NONE)
		BUG();

	for (i = 0; i < nelems; i++, sg++)
450
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
451
			unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
452 453
		else if (direction == PCI_DMA_FROMDEVICE)
			mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
Linus Torvalds's avatar
Linus Torvalds committed
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
}

/*
 * Make physical memory consistent for a set of streaming mode DMA translations after a
 * transfer.
 *
 * The same as swiotlb_dma_sync_single but for a scatter-gather list, same rules and
 * usage.
 */
void
swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
{
	int i;

	if (direction == PCI_DMA_NONE)
		BUG();

	for (i = 0; i < nelems; i++, sg++)
472
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
473
			sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
Linus Torvalds's avatar
Linus Torvalds committed
474 475 476 477 478
}

unsigned long
swiotlb_dma_address (struct scatterlist *sg)
{
479
	return sg->dma_address;
Linus Torvalds's avatar
Linus Torvalds committed
480
}
Linus Torvalds's avatar
Linus Torvalds committed
481

482 483 484 485 486 487 488 489 490 491 492
/*
 * Return whether the given PCI device DMA address mask can be supported properly.  For
 * example, if your device can only drive the low 24-bits during PCI bus mastering, then
 * you would pass 0x00ffffff as the mask to this function.
 */
int
swiotlb_pci_dma_supported (struct pci_dev *hwdev, u64 mask)
{
	return 1;
}

Linus Torvalds's avatar
Linus Torvalds committed
493 494 495 496 497 498 499 500 501 502
EXPORT_SYMBOL(swiotlb_init);
EXPORT_SYMBOL(swiotlb_map_single);
EXPORT_SYMBOL(swiotlb_unmap_single);
EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single);
EXPORT_SYMBOL(swiotlb_sync_sg);
EXPORT_SYMBOL(swiotlb_dma_address);
EXPORT_SYMBOL(swiotlb_alloc_consistent);
EXPORT_SYMBOL(swiotlb_free_consistent);
503
EXPORT_SYMBOL(swiotlb_pci_dma_supported);