Commit 52aee3e8 authored by Christoph Hellwig's avatar Christoph Hellwig

ia64/sba_iommu: improve internal map_page users

Remove the odd sba_{un,}map_single_attrs wrappers, check errors
everywhere.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a20388be
...@@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) ...@@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
} }
/** /**
* sba_map_single_attrs - map one buffer and return IOVA for DMA * sba_map_page - map one buffer and return IOVA for DMA
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
* @addr: driver buffer to map. * @page: page to map
* @size: number of bytes to map in driver buffer. * @poff: offset into page
* @dir: R/W or both. * @size: number of bytes to map
* @dir: dma direction
* @attrs: optional dma attributes * @attrs: optional dma attributes
* *
* See Documentation/DMA-API-HOWTO.txt * See Documentation/DMA-API-HOWTO.txt
...@@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, ...@@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
** Device is bit capable of DMA'ing to the buffer... ** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr ** just return the PCI address of ptr
*/ */
DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: " DBG_BYPASS("sba_map_page() bypass mask/addr: "
"0x%lx/0x%lx\n", "0x%lx/0x%lx\n",
to_pci_dev(dev)->dma_mask, pci_addr); to_pci_dev(dev)->dma_mask, pci_addr);
return pci_addr; return pci_addr;
...@@ -966,7 +967,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, ...@@ -966,7 +967,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags); spin_lock_irqsave(&ioc->res_lock, flags);
if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()")) if (sba_check_pdir(ioc,"Check before sba_map_page()"))
panic("Sanity check failed"); panic("Sanity check failed");
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif #endif
...@@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, ...@@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
/* form complete address */ /* form complete address */
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags); spin_lock_irqsave(&ioc->res_lock, flags);
sba_check_pdir(ioc,"Check after sba_map_single_attrs()"); sba_check_pdir(ioc,"Check after sba_map_page()");
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif #endif
return SBA_IOVA(ioc, iovp, offset); return SBA_IOVA(ioc, iovp, offset);
} }
static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
return sba_map_page(dev, virt_to_page(addr),
(unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
}
#ifdef ENABLE_MARK_CLEAN #ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void static SBA_INLINE void
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
...@@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) ...@@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
#endif #endif
/** /**
* sba_unmap_single_attrs - unmap one IOVA and free resources * sba_unmap_page - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped. * @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer. * @size: number of bytes mapped in driver buffer.
...@@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, ...@@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
/* /*
** Address does not fall w/in IOVA, must be bypassing ** Address does not fall w/in IOVA, must be bypassing
*/ */
DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n", DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
iova); iova);
#ifdef ENABLE_MARK_CLEAN #ifdef ENABLE_MARK_CLEAN
...@@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, ...@@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
#endif /* DELAYED_RESOURCE_CNT == 0 */ #endif /* DELAYED_RESOURCE_CNT == 0 */
} }
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
sba_unmap_page(dev, iova, size, dir, attrs);
}
/** /**
* sba_alloc_coherent - allocate/map shared mem for DMA * sba_alloc_coherent - allocate/map shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
...@@ -1132,30 +1119,24 @@ static void * ...@@ -1132,30 +1119,24 @@ static void *
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, unsigned long attrs) gfp_t flags, unsigned long attrs)
{ {
struct page *page;
struct ioc *ioc; struct ioc *ioc;
int node = -1;
void *addr; void *addr;
ioc = GET_IOC(dev); ioc = GET_IOC(dev);
ASSERT(ioc); ASSERT(ioc);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
{ node = ioc->node;
struct page *page;
page = alloc_pages_node(ioc->node, flags, get_order(size));
if (unlikely(!page))
return NULL;
addr = page_address(page);
}
#else
addr = (void *) __get_free_pages(flags, get_order(size));
#endif #endif
if (unlikely(!addr))
page = alloc_pages_node(node, flags, get_order(size));
if (unlikely(!page))
return NULL; return NULL;
addr = page_address(page);
memset(addr, 0, size); memset(addr, 0, size);
*dma_handle = virt_to_phys(addr); *dma_handle = page_to_phys(page);
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
ASSERT(dev->coherent_dma_mask); ASSERT(dev->coherent_dma_mask);
...@@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
* If device can't bypass or bypass is disabled, pass the 32bit fake * If device can't bypass or bypass is disabled, pass the 32bit fake
* device to map single to get an iova mapping. * device to map single to get an iova mapping.
*/ */
*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
size, 0, 0); DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, *dma_handle))
return NULL;
return addr; return addr;
} }
...@@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs) dma_addr_t dma_handle, unsigned long attrs)
{ {
sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); sba_unmap_page(dev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size)); free_pages((unsigned long) vaddr, get_order(size));
} }
...@@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, ...@@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
/* Fast path single entry scatterlists. */ /* Fast path single entry scatterlists. */
if (nents == 1) { if (nents == 1) {
sglist->dma_length = sglist->length; sglist->dma_length = sglist->length;
sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs); sglist->dma_address = sba_map_page(dev, sg_page(sglist),
sglist->offset, sglist->length, dir, attrs);
if (dma_mapping_error(dev, sglist->dma_address))
return 0;
return 1; return 1;
} }
...@@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, ...@@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
while (nents && sglist->dma_length) { while (nents && sglist->dma_length) {
sba_unmap_single_attrs(dev, sglist->dma_address, sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
sglist->dma_length, dir, attrs); dir, attrs);
sglist = sg_next(sglist); sglist = sg_next(sglist);
nents--; nents--;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment