Commit a75f8710 authored by Jesse Barnes's avatar Jesse Barnes Committed by Tony Luck

[IA64] swiotlb.c: long line, whitespace, and other cleanup

Clean up swiotlb.c a bit in preparation for some other changes that might be 
coming (e.g. moving it to the top level lib/ directory, adding support for 
more uses).  Mostly whitespace and long line fixes, along with a few printk 
fixes.
Signed-off-by: default avatarJesse Barnes <jbarnes@sgi.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 27b9590e
...@@ -44,41 +44,44 @@ ...@@ -44,41 +44,44 @@
#define IO_TLB_SEGSIZE 128 #define IO_TLB_SEGSIZE 128
/* /*
* log of the size of each IO TLB slab. The number of slabs is command line controllable. * log of the size of each IO TLB slab. The number of slabs is command line
* controllable.
*/ */
#define IO_TLB_SHIFT 11 #define IO_TLB_SHIFT 11
int swiotlb_force; int swiotlb_force;
/* /*
* Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single_*, to see * Used to do a quick range check in swiotlb_unmap_single and
* if the memory was in fact allocated by this API. * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/ */
static char *io_tlb_start, *io_tlb_end; static char *io_tlb_start, *io_tlb_end;
/* /*
* The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end. * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
* This is command line adjustable via setup_io_tlb_npages. * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
* Default to 64MB. * Default to 64MB.
*/ */
static unsigned long io_tlb_nslabs = 32768; static unsigned long io_tlb_nslabs = 32768;
/* /*
* When the IOMMU overflows we return a fallback buffer. This sets the size. * When the IOMMU overflows we return a fallback buffer. This sets the size.
*/ */
static unsigned long io_tlb_overflow = 32*1024; static unsigned long io_tlb_overflow = 32*1024;
void *io_tlb_overflow_buffer; void *io_tlb_overflow_buffer;
/* /*
* This is a free list describing the number of free entries available from each index * This is a free list describing the number of free entries available from
* each index
*/ */
static unsigned int *io_tlb_list; static unsigned int *io_tlb_list;
static unsigned int io_tlb_index; static unsigned int io_tlb_index;
/* /*
* We need to save away the original address corresponding to a mapped entry for the sync * We need to save away the original address corresponding to a mapped entry
* operations. * for the sync operations.
*/ */
static unsigned char **io_tlb_orig_addr; static unsigned char **io_tlb_orig_addr;
...@@ -88,10 +91,11 @@ static unsigned char **io_tlb_orig_addr; ...@@ -88,10 +91,11 @@ static unsigned char **io_tlb_orig_addr;
static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED; static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
static int __init static int __init
setup_io_tlb_npages (char *str) setup_io_tlb_npages(char *str)
{ {
if (isdigit(*str)) { if (isdigit(*str)) {
io_tlb_nslabs = simple_strtoul(str, &str, 0) << (PAGE_SHIFT - IO_TLB_SHIFT); io_tlb_nslabs = simple_strtoul(str, &str, 0) <<
(PAGE_SHIFT - IO_TLB_SHIFT);
/* avoid tail segment of size < IO_TLB_SEGSIZE */ /* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
} }
...@@ -105,18 +109,19 @@ __setup("swiotlb=", setup_io_tlb_npages); ...@@ -105,18 +109,19 @@ __setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */ /* make io_tlb_overflow tunable too? */
/* /*
* Statically reserve bounce buffer space and initialize bounce buffer data structures for * Statically reserve bounce buffer space and initialize bounce buffer data
* the software IO TLB used to implement the PCI DMA API. * structures for the software IO TLB used to implement the PCI DMA API.
*/ */
void void
swiotlb_init (void) swiotlb_init(void)
{ {
unsigned long i; unsigned long i;
/* /*
* Get IO TLB memory from the low pages * Get IO TLB memory from the low pages
*/ */
io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
(1 << IO_TLB_SHIFT));
if (!io_tlb_start) if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer"); panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
...@@ -131,28 +136,30 @@ swiotlb_init (void) ...@@ -131,28 +136,30 @@ swiotlb_init (void)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0; io_tlb_index = 0;
io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
/* /*
* Get the overflow emergency buffer * Get the overflow emergency buffer
*/ */
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
} }
static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr) static inline int
{ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
dma_addr_t mask = 0xffffffff; {
if (hwdev && hwdev->dma_mask) dma_addr_t mask = 0xffffffff;
mask = *hwdev->dma_mask; /* If the device has a mask, use it, otherwise default to 32 bits */
return (addr & ~mask) != 0; if (hwdev && hwdev->dma_mask)
} mask = *hwdev->dma_mask;
return (addr & ~mask) != 0;
}
/* /*
* Allocates bounce buffer and returns its kernel virtual address. * Allocates bounce buffer and returns its kernel virtual address.
*/ */
static void * static void *
map_single (struct device *hwdev, char *buffer, size_t size, int dir) map_single(struct device *hwdev, char *buffer, size_t size, int dir)
{ {
unsigned long flags; unsigned long flags;
char *dma_addr; char *dma_addr;
...@@ -160,11 +167,11 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) ...@@ -160,11 +167,11 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir)
int i; int i;
/* /*
* For mappings greater than a page size, we limit the stride (and hence alignment) * For mappings greater than a page, we limit the stride (and
* to a page size. * hence alignment) to a page size.
*/ */
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
if (size > (1 << PAGE_SHIFT)) if (size > PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else else
stride = 1; stride = 1;
...@@ -173,8 +180,8 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) ...@@ -173,8 +180,8 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir)
BUG(); BUG();
/* /*
* Find suitable number of IO TLB entries size that will fit this request and * Find suitable number of IO TLB entries size that will fit this
* allocate a buffer from that IO TLB pool. * request and allocate a buffer from that IO TLB pool.
*/ */
spin_lock_irqsave(&io_tlb_lock, flags); spin_lock_irqsave(&io_tlb_lock, flags);
{ {
...@@ -185,22 +192,23 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) ...@@ -185,22 +192,23 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir)
do { do {
/* /*
* If we find a slot that indicates we have 'nslots' number of * If we find a slot that indicates we have 'nslots'
* contiguous buffers, we allocate the buffers from that slot and * number of contiguous buffers, we allocate the
* mark the entries as '0' indicating unavailable. * buffers from that slot and mark the entries as '0'
* indicating unavailable.
*/ */
if (io_tlb_list[index] >= nslots) { if (io_tlb_list[index] >= nslots) {
int count = 0; int count = 0;
for (i = index; i < (int) (index + nslots); i++) for (i = index; i < (int) (index + nslots); i++)
io_tlb_list[i] = 0; io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
&& io_tlb_list[i]; i--)
io_tlb_list[i] = ++count; io_tlb_list[i] = ++count;
dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
/* /*
* Update the indices to avoid searching in the next round. * Update the indices to avoid searching in
* the next round.
*/ */
io_tlb_index = ((index + nslots) < io_tlb_nslabs io_tlb_index = ((index + nslots) < io_tlb_nslabs
? (index + nslots) : 0); ? (index + nslots) : 0);
...@@ -219,8 +227,9 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) ...@@ -219,8 +227,9 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir)
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
/* /*
* Save away the mapping from the original address to the DMA address. This is * Save away the mapping from the original address to the DMA address.
* needed when we sync the memory. Then we sync the buffer if needed. * This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/ */
io_tlb_orig_addr[index] = buffer; io_tlb_orig_addr[index] = buffer;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
...@@ -233,10 +242,10 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) ...@@ -233,10 +242,10 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir)
* dma_addr is the kernel virtual address of the bounce buffer to unmap. * dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/ */
static void static void
unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir) unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{ {
unsigned long flags; unsigned long flags;
int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index]; char *buffer = io_tlb_orig_addr[index];
...@@ -245,40 +254,39 @@ unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir) ...@@ -245,40 +254,39 @@ unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
*/ */
if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
/* /*
* bounce... copy the data back into the original buffer * and delete the * bounce... copy the data back into the original buffer * and
* bounce buffer. * delete the bounce buffer.
*/ */
memcpy(buffer, dma_addr, size); memcpy(buffer, dma_addr, size);
/* /*
* Return the buffer to the free list by setting the corresponding entries to * Return the buffer to the free list by setting the corresponding
* indicate the number of contigous entries available. While returning the * entries to indicate the number of contigous entries available.
* entries to the free list, we merge the entries with slots below and above the * While returning the entries to the free list, we merge the entries
* pool being returned. * with slots below and above the pool being returned.
*/ */
spin_lock_irqsave(&io_tlb_lock, flags); spin_lock_irqsave(&io_tlb_lock, flags);
{ {
int count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
io_tlb_list[index + nslots] : 0); io_tlb_list[index + nslots] : 0);
/* /*
* Step 1: return the slots to the free list, merging the slots with * Step 1: return the slots to the free list, merging the
* superceeding slots * slots with superceeding slots
*/ */
for (i = index + nslots - 1; i >= index; i--) for (i = index + nslots - 1; i >= index; i--)
io_tlb_list[i] = ++count; io_tlb_list[i] = ++count;
/* /*
* Step 2: merge the returned slots with the preceding slots, if * Step 2: merge the returned slots with the preceding slots,
* available (non zero) * if available (non zero)
*/ */
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i]; i--)
io_tlb_list[i] = ++count; io_tlb_list[i] = ++count;
} }
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
} }
static void static void
sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir) sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{ {
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index]; char *buffer = io_tlb_orig_addr[index];
...@@ -296,13 +304,18 @@ sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir) ...@@ -296,13 +304,18 @@ sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
} }
void * void *
swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags) swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, int flags)
{ {
unsigned long dev_addr; unsigned long dev_addr;
void *ret; void *ret;
int order = get_order(size); int order = get_order(size);
/* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */ /*
* XXX fix me: the DMA API should pass us an explicit DMA mask
* instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
* bit range instead of a 16MB one).
*/
flags |= GFP_DMA; flags |= GFP_DMA;
ret = (void *)__get_free_pages(flags, order); ret = (void *)__get_free_pages(flags, order);
...@@ -331,14 +344,21 @@ swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handl ...@@ -331,14 +344,21 @@ swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handl
memset(ret, 0, size); memset(ret, 0, size);
dev_addr = virt_to_phys(ret); dev_addr = virt_to_phys(ret);
if (address_needs_mapping(hwdev,dev_addr))
panic("swiotlb_alloc_consistent: allocated memory is out of range for device"); /* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr)) {
printk("hwdev DMA mask = 0x%016lx, dev_addr = 0x%016lx\n",
*hwdev->dma_mask, dev_addr);
panic("swiotlb_alloc_coherent: allocated memory is out of "
"range for device");
}
*dma_handle = dev_addr; *dma_handle = dev_addr;
return ret; return ret;
} }
void void
swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{ {
if (!(vaddr >= (void *)io_tlb_start if (!(vaddr >= (void *)io_tlb_start
&& vaddr < (void *)io_tlb_end)) && vaddr < (void *)io_tlb_end))
...@@ -348,66 +368,63 @@ swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_ ...@@ -348,66 +368,63 @@ swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
} }
static void swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
{ {
/* /*
* Ran out of IOMMU space for this operation. This is very bad. * Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly. * Unfortunately the drivers cannot handle this operation properly.
* unless they check for pci_dma_mapping_error (most don't) * unless they check for pci_dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit * When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big. * the damage, or panic when the transfer is too big.
*/ */
printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
printk(KERN_ERR "device %s\n", size, dev ? dev->bus_id : "?");
"PCI-DMA: Out of SW-IOMMU space for %lu bytes at device %s\n",
size, dev ? dev->bus_id : "?");
if (size > io_tlb_overflow && do_panic) { if (size > io_tlb_overflow && do_panic) {
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Memory would be corrupted\n"); panic("PCI-DMA: Memory would be corrupted\n");
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Random memory would be DMAed\n"); panic("PCI-DMA: Random memory would be DMAed\n");
} }
} }
/* /*
* Map a single buffer of the indicated size for DMA in streaming mode. The PCI address * Map a single buffer of the indicated size for DMA in streaming mode. The
* to use is returned. * PCI address to use is returned.
* *
* Once the device is given the dma address, the device owns this memory until either * Once the device is given the dma address, the device owns this memory until
* swiotlb_unmap_single or swiotlb_dma_sync_single is performed. * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
*/ */
dma_addr_t dma_addr_t
swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir) swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{ {
unsigned long dev_addr = virt_to_phys(ptr); unsigned long dev_addr = virt_to_phys(ptr);
void *map; void *map;
if (dir == DMA_NONE) if (dir == DMA_NONE)
BUG(); BUG();
/* /*
* Check if the PCI device can DMA to ptr... if so, just return ptr * If the pointer passed in happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/ */
if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
/*
* Device is bit capable of DMA'ing to the buffer... just return the PCI
* address of ptr
*/
return dev_addr; return dev_addr;
/* /*
* get a bounce buffer: * Oh well, have to allocate and map a bounce buffer.
*/ */
map = map_single(hwdev, ptr, size, dir); map = map_single(hwdev, ptr, size, dir);
if (!map) { if (!map) {
swiotlb_full(hwdev, size, dir, 1); swiotlb_full(hwdev, size, dir, 1);
map = io_tlb_overflow_buffer; map = io_tlb_overflow_buffer;
} }
dev_addr = virt_to_phys(map); dev_addr = virt_to_phys(map);
/* /*
* Ensure that the address returned is DMA'ble: * Ensure that the address returned is DMA'ble
*/ */
if (address_needs_mapping(hwdev, dev_addr)) if (address_needs_mapping(hwdev, dev_addr))
panic("map_single: bounce buffer is not DMA'ble"); panic("map_single: bounce buffer is not DMA'ble");
...@@ -421,7 +438,7 @@ swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir) ...@@ -421,7 +438,7 @@ swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir)
* flush them when they get mapped into an executable vm-area. * flush them when they get mapped into an executable vm-area.
*/ */
static void static void
mark_clean (void *addr, size_t size) mark_clean(void *addr, size_t size)
{ {
unsigned long pg_addr, end; unsigned long pg_addr, end;
...@@ -435,15 +452,16 @@ mark_clean (void *addr, size_t size) ...@@ -435,15 +452,16 @@ mark_clean (void *addr, size_t size)
} }
/* /*
* Unmap a single streaming mode DMA translation. The dma_addr and size must match what * Unmap a single streaming mode DMA translation. The dma_addr and size must
* was provided for in a previous swiotlb_map_single call. All other usages are * match what was provided for in a previous swiotlb_map_single call. All
* undefined. * other usages are undefined.
* *
* After this call, reads by the cpu to the buffer are guaranteed to see whatever the * After this call, reads by the cpu to the buffer are guaranteed to see
* device wrote there. * whatever the device wrote there.
*/ */
void void
swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
int dir)
{ {
char *dma_addr = phys_to_virt(dev_addr); char *dma_addr = phys_to_virt(dev_addr);
...@@ -456,16 +474,18 @@ swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, in ...@@ -456,16 +474,18 @@ swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, in
} }
/* /*
* Make physical memory consistent for a single streaming mode DMA translation after a * Make physical memory consistent for a single streaming mode DMA translation
* transfer. * after a transfer.
* *
* If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu, * If you perform a swiotlb_map_single() but wish to interrogate the buffer
* yet do not wish to teardown the PCI dma mapping, you must call this function before * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
* doing so. At the next point you give the PCI dma address back to the card, you must * call this function before doing so. At the next point you give the PCI dma
* first perform a swiotlb_dma_sync_for_device, and then the device again owns the buffer * address back to the card, you must first perform a
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/ */
void void
swiotlb_sync_single_for_cpu (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{ {
char *dma_addr = phys_to_virt(dev_addr); char *dma_addr = phys_to_virt(dev_addr);
...@@ -478,7 +498,8 @@ swiotlb_sync_single_for_cpu (struct device *hwdev, dma_addr_t dev_addr, size_t s ...@@ -478,7 +498,8 @@ swiotlb_sync_single_for_cpu (struct device *hwdev, dma_addr_t dev_addr, size_t s
} }
void void
swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{ {
char *dma_addr = phys_to_virt(dev_addr); char *dma_addr = phys_to_virt(dev_addr);
...@@ -491,10 +512,11 @@ swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_ ...@@ -491,10 +512,11 @@ swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_
} }
/* /*
* Map a set of buffers described by scatterlist in streaming mode for DMA. This is the * Map a set of buffers described by scatterlist in streaming mode for DMA.
* scatter-gather version of the above swiotlb_map_single interface. Here the scatter * This is the scatter-gather version of the above swiotlb_map_single
* gather list elements are each tagged with the appropriate dma address and length. They * interface. Here the scatter gather list elements are each tagged with the
* are obtained via sg_dma_{address,length}(SG). * appropriate dma address and length. They are obtained via
* sg_dma_{address,length}(SG).
* *
* NOTE: An implementation may be able to use a smaller number of * NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements. * DMA address/length pairs than there are SG table elements.
...@@ -502,10 +524,12 @@ swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_ ...@@ -502,10 +524,12 @@ swiotlb_sync_single_for_device (struct device *hwdev, dma_addr_t dev_addr, size_
* The routine returns the number of addr/length pairs actually * The routine returns the number of addr/length pairs actually
* used, at most nents. * used, at most nents.
* *
* Device ownership issues as mentioned above for swiotlb_map_single are the same here. * Device ownership issues as mentioned above for swiotlb_map_single are the
* same here.
*/ */
int int
swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir) swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{ {
void *addr; void *addr;
unsigned long dev_addr; unsigned long dev_addr;
...@@ -520,9 +544,9 @@ swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int di ...@@ -520,9 +544,9 @@ swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int di
if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir)); sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir));
if (!sg->dma_address) { if (!sg->dma_address) {
/* Don't panic here, we expect pci_map_sg users /* Don't panic here, we expect map_sg users
to do proper error handling. */ to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0); swiotlb_full(hwdev, sg->length, dir, 0);
swiotlb_unmap_sg(hwdev, sg - i, i, dir); swiotlb_unmap_sg(hwdev, sg - i, i, dir);
sg[0].dma_length = 0; sg[0].dma_length = 0;
return 0; return 0;
...@@ -535,11 +559,12 @@ swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int di ...@@ -535,11 +559,12 @@ swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int di
} }
/* /*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls * Unmap a set of streaming mode DMA translations. Again, cpu read rules
* here are the same as for swiotlb_unmap_single() above. * concerning calls here are the same as for swiotlb_unmap_single() above.
*/ */
void void
swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir) swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{ {
int i; int i;
...@@ -554,14 +579,15 @@ swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int ...@@ -554,14 +579,15 @@ swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int
} }
/* /*
* Make physical memory consistent for a set of streaming mode DMA translations after a * Make physical memory consistent for a set of streaming mode DMA translations
* transfer. * after a transfer.
* *
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules and * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* usage. * and usage.
*/ */
void void
swiotlb_sync_sg_for_cpu (struct device *hwdev, struct scatterlist *sg, int nelems, int dir) swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
{ {
int i; int i;
...@@ -570,11 +596,13 @@ swiotlb_sync_sg_for_cpu (struct device *hwdev, struct scatterlist *sg, int nelem ...@@ -570,11 +596,13 @@ swiotlb_sync_sg_for_cpu (struct device *hwdev, struct scatterlist *sg, int nelem
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir); sync_single(hwdev, (void *) sg->dma_address,
sg->dma_length, dir);
} }
void void
swiotlb_sync_sg_for_device (struct device *hwdev, struct scatterlist *sg, int nelems, int dir) swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
{ {
int i; int i;
...@@ -583,19 +611,21 @@ swiotlb_sync_sg_for_device (struct device *hwdev, struct scatterlist *sg, int ne ...@@ -583,19 +611,21 @@ swiotlb_sync_sg_for_device (struct device *hwdev, struct scatterlist *sg, int ne
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir); sync_single(hwdev, (void *) sg->dma_address,
sg->dma_length, dir);
} }
int int
swiotlb_dma_mapping_error (dma_addr_t dma_addr) swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{ {
return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
} }
/* /*
* Return whether the given PCI device DMA address mask can be supported properly. For * Return whether the given PCI device DMA address mask can be supported
* example, if your device can only drive the low 24-bits during PCI bus mastering, then * properly. For example, if your device can only drive the low 24-bits
* you would pass 0x00ffffff as the mask to this function. * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/ */
int int
swiotlb_dma_supported (struct device *hwdev, u64 mask) swiotlb_dma_supported (struct device *hwdev, u64 mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment