Commit 9b678c1e authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: Use enum dma_data_direction for all APIs

From: Stephen Rothwell <sfr@canb.auug.org.au>

This is just a cleanup to use enum dma_data_direction for all APIs
except the pci_dma_ ones (since they are defined generically).

Also make most of the functions in arch/ppc64/kernel/pci_iommu.c
static.
parent f4421b9c
......@@ -33,6 +33,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
......@@ -69,7 +70,7 @@ extern struct list_head iSeries_Global_Device_List;
static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, int direction)
unsigned long uaddr, enum dma_data_direction direction)
{
u64 rc;
union tce_entry tce;
......@@ -82,12 +83,12 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
/* Virtual Bus */
tce.te_bits.tb_valid = 1;
tce.te_bits.tb_allio = 1;
if (direction != PCI_DMA_TODEVICE)
if (direction != DMA_TO_DEVICE)
tce.te_bits.tb_rdwr = 1;
} else {
/* PCI Bus */
tce.te_bits.tb_rdwr = 1; /* Read allowed */
if (direction != PCI_DMA_TODEVICE)
if (direction != DMA_TO_DEVICE)
tce.te_bits.tb_pciwr = 1;
}
......
......@@ -31,7 +31,6 @@
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <asm/io.h>
......@@ -142,7 +141,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long np
}
dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
unsigned int npages, int direction)
unsigned int npages, enum dma_data_direction direction)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
......@@ -227,7 +226,8 @@ void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
int iommu_alloc_sg(struct iommu_table *tbl, struct device *dev,
struct scatterlist *sglist, int nelems, int direction)
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
dma_addr_t dma_next, dma_addr;
unsigned long flags;
......
......@@ -33,6 +33,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
......@@ -46,7 +47,7 @@
static void tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
int direction)
enum dma_data_direction direction)
{
union tce_entry t;
union tce_entry *tp;
......@@ -54,7 +55,7 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
t.te_word = 0;
t.te_rdwr = 1; // Read allowed
if (direction != PCI_DMA_TODEVICE)
if (direction != DMA_TO_DEVICE)
t.te_pciwr = 1;
tp = ((union tce_entry *)tbl->it_base) + index;
......
......@@ -21,7 +21,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
......@@ -130,8 +130,9 @@ long plpar_put_term_char(unsigned long termno,
lbuf[1]);
}
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages,
unsigned long uaddr, int direction )
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
{
u64 rc;
union tce_entry tce;
......@@ -139,7 +140,7 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npage
tce.te_word = 0;
tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
tce.te_rdwr = 1;
if (direction != PCI_DMA_TODEVICE)
if (direction != DMA_TO_DEVICE)
tce.te_pciwr = 1;
while (npages--) {
......
......@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/sections.h>
#include <asm/io.h>
......@@ -49,18 +50,18 @@ static void pci_direct_free_consistent(struct pci_dev *hwdev, size_t size,
}
static dma_addr_t pci_direct_map_single(struct pci_dev *hwdev, void *ptr,
size_t size, int direction)
size_t size, enum dma_data_direction direction)
{
return virt_to_abs(ptr);
}
static void pci_direct_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
size_t size, enum dma_data_direction direction)
{
}
static int pci_direct_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
int nents, enum dma_data_direction direction)
{
int i;
......@@ -73,7 +74,7 @@ static int pci_direct_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
}
static void pci_direct_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
int nents, enum dma_data_direction direction)
{
}
......
......@@ -66,7 +66,7 @@ static inline struct iommu_table *devnode_table(struct pci_dev *dev)
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
static void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
struct iommu_table *tbl;
......@@ -100,7 +100,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL);
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
......@@ -112,7 +112,7 @@ void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
}
void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
static void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
struct iommu_table *tbl;
......@@ -136,15 +136,15 @@ void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
size_t size, int direction)
static dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
struct iommu_table * tbl;
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
unsigned int npages;
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
......@@ -167,13 +167,13 @@ dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
}
void pci_iommu_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
static void pci_iommu_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
struct iommu_table *tbl;
unsigned int npages;
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(direction == DMA_NONE);
npages = (PAGE_ALIGN(dma_handle + size) - (dma_handle & PAGE_MASK))
>> PAGE_SHIFT;
......@@ -185,12 +185,12 @@ void pci_iommu_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_handle,
}
int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
int direction)
static int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct iommu_table * tbl;
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(direction == DMA_NONE);
if (nelems == 0)
return 0;
......@@ -202,12 +202,12 @@ int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem
return iommu_alloc_sg(tbl, &pdev->dev, sglist, nelems, direction);
}
void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
int direction)
static void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct iommu_table *tbl;
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(direction == DMA_NONE);
tbl = devnode_table(pdev);
if (!tbl)
......
......@@ -33,6 +33,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/prom.h>
......@@ -141,7 +142,7 @@ static void dart_flush(struct iommu_table *tbl)
static void dart_build_pmac(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
int direction)
enum dma_data_direction direction)
{
unsigned int *dp;
unsigned int rpn;
......
......@@ -431,7 +431,7 @@ dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr,
tbl = dev->iommu_table;
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, (int)direction);
dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
dma_handle |= (uaddr & ~PAGE_MASK);
}
......@@ -470,7 +470,7 @@ int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
if (!tbl)
return 0;
return iommu_alloc_sg(tbl, &vdev->dev, sglist, nelems, (int)direction);
return iommu_alloc_sg(tbl, &vdev->dev, sglist, nelems, direction);
}
EXPORT_SYMBOL(vio_map_sg);
......@@ -515,7 +515,7 @@ void *vio_alloc_consistent(struct vio_dev *dev, size_t size,
/* Page allocation succeeded */
memset(ret, 0, npages << PAGE_SHIFT);
/* Set up tces to cover the allocated range */
tce = iommu_alloc(tbl, ret, npages, (int)DMA_BIDIRECTIONAL);
tce = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
if (tce == DMA_ERROR_CODE) {
PPCDBG(PPCDBG_TCE, "vio_alloc_consistent: iommu_alloc failed\n" );
free_pages((unsigned long)ret, order);
......
......@@ -25,6 +25,7 @@
#include <asm/types.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
......@@ -132,14 +133,15 @@ extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
/* allocates a range of tces and sets them to the pages */
extern dma_addr_t iommu_alloc(struct iommu_table *, void *page,
unsigned int numPages, int direction);
unsigned int numPages,
enum dma_data_direction direction);
extern void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages);
/* same with sg lists */
extern int iommu_alloc_sg(struct iommu_table *table, struct device *dev,
struct scatterlist *sglist, int nelems,
int direction);
enum dma_data_direction direction);
extern void iommu_free_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems);
......
......@@ -11,6 +11,7 @@
#include <linux/config.h>
#include <linux/seq_file.h>
#include <linux/dma-mapping.h>
struct pt_regs;
struct pci_bus;
......@@ -57,7 +58,7 @@ struct machdep_calls {
long index,
long npages,
unsigned long uaddr,
int direction);
enum dma_data_direction direction);
void (*tce_free)(struct iommu_table *tbl,
long index,
long npages);
......
......@@ -2,6 +2,8 @@
#ifndef _ASM_PCI_BRIDGE_H
#define _ASM_PCI_BRIDGE_H
#include <linux/pci.h>
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
......
......@@ -64,13 +64,13 @@ struct pci_dma_ops {
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*pci_map_single)(struct pci_dev *hwdev, void *ptr,
size_t size, int direction);
size_t size, enum dma_data_direction direction);
void (*pci_unmap_single)(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction);
size_t size, enum dma_data_direction direction);
int (*pci_map_sg)(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction);
int nents, enum dma_data_direction direction);
void (*pci_unmap_sg)(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction);
int nents, enum dma_data_direction direction);
int (*pci_dma_supported)(struct pci_dev *hwdev, u64 mask);
int (*pci_dac_dma_supported)(struct pci_dev *hwdev, u64 mask);
};
......@@ -92,25 +92,29 @@ static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
size_t size, int direction)
{
return pci_dma_ops.pci_map_single(hwdev, ptr, size, direction);
return pci_dma_ops.pci_map_single(hwdev, ptr, size,
(enum dma_data_direction)direction);
}
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
pci_dma_ops.pci_unmap_single(hwdev, dma_addr, size, direction);
pci_dma_ops.pci_unmap_single(hwdev, dma_addr, size,
(enum dma_data_direction)direction);
}
static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return pci_dma_ops.pci_map_sg(hwdev, sg, nents, direction);
return pci_dma_ops.pci_map_sg(hwdev, sg, nents,
(enum dma_data_direction)direction);
}
static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
pci_dma_ops.pci_unmap_sg(hwdev, sg, nents, direction);
pci_dma_ops.pci_unmap_sg(hwdev, sg, nents,
(enum dma_data_direction)direction);
}
static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment