Commit e1df56ff authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: Consolidate some of the iommu DMA mapping routines.

From: Stephen Rothwell <sfr@canb.auug.org.au>

This patch consolidates some of the iommu DMA mapping routines.
parent 9b678c1e
......@@ -140,7 +140,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long np
return n;
}
dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
unsigned int npages, enum dma_data_direction direction)
{
unsigned long entry, flags;
......@@ -206,7 +206,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
__clear_bit(free_entry+i, tbl->it_map);
}
void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long flags;
......@@ -225,7 +225,7 @@ void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev,
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
......@@ -235,6 +235,11 @@ int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev,
int outcount;
unsigned long handle;
BUG_ON(direction == DMA_NONE);
if ((nelems == 0) || !tbl)
return 0;
outs = s = segstart = &sglist[0];
outcount = 1;
handle = 0;
......@@ -349,11 +354,16 @@ int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev,
}
void iommu_free_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems)
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
unsigned long flags;
BUG_ON(direction == DMA_NONE);
if (!tbl)
return;
spin_lock_irqsave(&(tbl->it_lock), flags);
while (nelems--) {
......@@ -414,3 +424,104 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
return tbl;
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address of the buffer
* passed here is the kernel (virtual) address of the buffer. The buffer
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
size_t size, enum dma_data_direction direction)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
unsigned int npages;
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
npages >>= PAGE_SHIFT;
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
"tbl %p vaddr %p npages %d\n",
tbl, vaddr, npages);
}
} else
dma_handle |= (uaddr & ~PAGE_MASK);
}
return dma_handle;
}
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (tbl)
iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
(dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
}
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
order = get_order(size);
/*
* Client asked for way too much space. This is checked later
* anyway. It is easier to debug here for the drivers than in
* the tce tables.
*/
if (order >= IOMAP_MAX_ORDER) {
printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
return (void *)DMA_ERROR_CODE;
}
if (!tbl)
return NULL;
/* Alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(GFP_ATOMIC, order);
if (!ret)
return NULL;
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
ret = NULL;
} else
*dma_handle = mapping;
return ret;
}
void iommu_free_consistent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
unsigned int npages;
if (tbl) {
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
}
......@@ -43,8 +43,6 @@
#include <asm/iSeries/iSeries_pci.h>
#endif /* CONFIG_PPC_ISERIES */
#define DBG(...)
static inline struct iommu_table *devnode_table(struct pci_dev *dev)
{
if (!dev)
......@@ -69,67 +67,15 @@ static inline struct iommu_table *devnode_table(struct pci_dev *dev)
static void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
struct iommu_table *tbl;
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
order = get_order(size);
/* Client asked for way too much space. This is checked later anyway */
/* It is easier to debug here for the drivers than in the tce tables.*/
if (order >= IOMAP_MAX_ORDER) {
printk("PCI_DMA: pci_alloc_consistent size too large: 0x%lx\n",
size);
return NULL;
}
tbl = devnode_table(hwdev);
if (!tbl)
return NULL;
/* Alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(GFP_ATOMIC, order);
if (!ret)
return NULL;
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
ret = NULL;
} else
*dma_handle = mapping;
return ret;
return iommu_alloc_consistent(devnode_table(hwdev), size, dma_handle);
}
static void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
struct iommu_table *tbl;
unsigned int npages;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
tbl = devnode_table(hwdev);
if (tbl) {
iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
iommu_free_consistent(devnode_table(hwdev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address of the buffer
* passed here is the kernel (virtual) address of the buffer. The buffer
......@@ -139,81 +85,28 @@ static void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
static dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
struct iommu_table * tbl;
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
unsigned int npages;
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
npages >>= PAGE_SHIFT;
tbl = devnode_table(hwdev);
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %p npages %d\n",
tbl, vaddr, npages);
}
} else
dma_handle |= (uaddr & ~PAGE_MASK);
}
return dma_handle;
return iommu_map_single(devnode_table(hwdev), vaddr, size, direction);
}
static void pci_iommu_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
struct iommu_table *tbl;
unsigned int npages;
BUG_ON(direction == DMA_NONE);
npages = (PAGE_ALIGN(dma_handle + size) - (dma_handle & PAGE_MASK))
>> PAGE_SHIFT;
tbl = devnode_table(hwdev);
if (tbl)
iommu_free(tbl, dma_handle, npages);
iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
}
static int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct iommu_table * tbl;
BUG_ON(direction == DMA_NONE);
if (nelems == 0)
return 0;
tbl = devnode_table(pdev);
if (!tbl)
return 0;
return iommu_alloc_sg(tbl, &pdev->dev, sglist, nelems, direction);
return iommu_map_sg(&pdev->dev, devnode_table(pdev), sglist,
nelems, direction);
}
static void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct iommu_table *tbl;
BUG_ON(direction == DMA_NONE);
tbl = devnode_table(pdev);
if (!tbl)
return;
iommu_free_sg(tbl, sglist, nelems);
iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
......
......@@ -413,145 +413,46 @@ int vio_disable_interrupts(struct vio_dev *dev)
}
EXPORT_SYMBOL(vio_disable_interrupts);
dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
struct iommu_table *tbl;
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
unsigned int npages;
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
npages = PAGE_ALIGN( uaddr + size ) - ( uaddr & PAGE_MASK );
npages >>= PAGE_SHIFT;
tbl = dev->iommu_table;
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
dma_handle |= (uaddr & ~PAGE_MASK);
}
return dma_handle;
return iommu_map_single(dev->iommu_table, vaddr, size, direction);
}
EXPORT_SYMBOL(vio_map_single);
void vio_unmap_single(struct vio_dev *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
struct iommu_table * tbl;
unsigned int npages;
BUG_ON(direction == DMA_NONE);
npages = PAGE_ALIGN( dma_handle + size ) - ( dma_handle & PAGE_MASK );
npages >>= PAGE_SHIFT;
tbl = dev->iommu_table;
if(tbl)
iommu_free(tbl, dma_handle, npages);
iommu_unmap_single(dev->iommu_table, dma_handle, size, direction);
}
EXPORT_SYMBOL(vio_unmap_single);
int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
struct iommu_table *tbl;
BUG_ON(direction == DMA_NONE);
if (nelems == 0)
return 0;
tbl = vdev->iommu_table;
if (!tbl)
return 0;
return iommu_alloc_sg(tbl, &vdev->dev, sglist, nelems, direction);
return iommu_map_sg(&vdev->dev, vdev->iommu_table, sglist,
nelems, direction);
}
EXPORT_SYMBOL(vio_map_sg);
void vio_unmap_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
struct iommu_table *tbl;
BUG_ON(direction == DMA_NONE);
tbl = vdev->iommu_table;
if (tbl)
iommu_free_sg(tbl, sglist, nelems);
iommu_unmap_sg(vdev->iommu_table, sglist, nelems, direction);
}
EXPORT_SYMBOL(vio_unmap_sg);
void *vio_alloc_consistent(struct vio_dev *dev, size_t size,
dma_addr_t *dma_handle)
{
struct iommu_table * tbl;
void *ret = NULL;
unsigned int npages, order;
dma_addr_t tce;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
order = get_order(size);
/* Client asked for way to much space. This is checked later anyway */
/* It is easier to debug here for the drivers than in the tce tables.*/
if(order >= IOMAP_MAX_ORDER) {
printk("VIO_DMA: vio_alloc_consistent size too large: 0x%lx \n", size);
return NULL;
}
tbl = dev->iommu_table;
if (tbl) {
/* Alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(GFP_ATOMIC, order);
if (ret) {
/* Page allocation succeeded */
memset(ret, 0, npages << PAGE_SHIFT);
/* Set up tces to cover the allocated range */
tce = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
if (tce == DMA_ERROR_CODE) {
PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: iommu_alloc failed\n" );
free_pages((unsigned long)ret, order);
ret = NULL;
} else {
*dma_handle = tce;
}
}
else PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: __get_free_pages failed for size = %d\n", size);
}
else PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: get_iommu_table failed for 0x%016lx\n", dev);
PPCDBG(PPCDBG_TCE, "\tvio_alloc_consistent: dma_handle = 0x%16.16lx\n", *dma_handle);
PPCDBG(PPCDBG_TCE, "\tvio_alloc_consistent: return = 0x%16.16lx\n", ret);
return ret;
return iommu_alloc_consistent(dev->iommu_table, size, dma_handle);
}
EXPORT_SYMBOL(vio_alloc_consistent);
void vio_free_consistent(struct vio_dev *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
struct iommu_table *tbl;
unsigned int npages;
PPCDBG(PPCDBG_TCE, "vio_free_consistent:\n");
PPCDBG(PPCDBG_TCE, "\tdev = 0x%16.16lx, size = 0x%16.16lx, dma_handle = 0x%16.16lx, vaddr = 0x%16.16lx\n", dev, size, dma_handle, vaddr);
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
tbl = dev->iommu_table;
if ( tbl ) {
iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
iommu_free_consistent(dev->iommu_table, size, vaddr, dma_handle);
}
EXPORT_SYMBOL(vio_free_consistent);
......
......@@ -19,8 +19,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PCI_DMA_H
#define _PCI_DMA_H
#ifndef _ASM_IOMMU_H
#define _ASM_IOMMU_H
#include <asm/types.h>
#include <linux/spinlock.h>
......@@ -131,20 +131,20 @@ extern void iommu_devnode_init(struct iSeries_Device_Node *dn);
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
/* allocates a range of tces and sets them to the pages */
extern dma_addr_t iommu_alloc(struct iommu_table *, void *page,
unsigned int numPages,
enum dma_data_direction direction);
extern void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages);
/* same with sg lists */
extern int iommu_alloc_sg(struct iommu_table *table, struct device *dev,
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction);
extern void iommu_free_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems);
extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction);
extern void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle);
extern void iommu_free_consistent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
size_t size, enum dma_data_direction direction);
extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction);
extern void tce_init_pSeries(void);
extern void tce_init_iSeries(void);
......@@ -154,4 +154,4 @@ extern void pci_dma_init_direct(void);
extern int ppc64_iommu_off;
#endif
#endif /* _ASM_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment