[PATCH] ppc64: iommu rewrite

Lots of things renamed, sillicaps killed, stuffs moved around and common
code properly extracted from implementation specific code, new
allocator, etc...  The code is overall a lot simpler, faster, less prone
to fail, and a lot more manageable.  I didn't use "bk mv", there is no
need to keep the old history attached to the new file.
parent e2d17c13
......@@ -109,6 +109,18 @@ config POWER4_ONLY
binary will not work on POWER3 or RS64 processors when compiled with
binutils 2.15 or later.
config IOMMU_VMERGE
bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
depends on EXPERIMENTAL
default n
help
Cause IO segments sent to a device for DMA to be merged virtually
by the IOMMU when they happen to have been allocated contiguously.
This doesn't add pressure to the IOMMU allocator. However, some
drivers don't support getting large merged segments coming back
from *_map_sg(). Say Y if you know the drivers you are using are
properly handling this case.
config SMP
bool "Symmetric multi-processing support"
---help---
......
......@@ -10,11 +10,12 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \
align.o semaphore.o bitops.o stab.o pacaData.o \
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
ptrace32.o signal32.o pmc.o rtc.o init_task.o \
lmb.o cputable.o cpu_setup_power4.o idle_power4.o
lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
iommu.o
obj-$(CONFIG_PPC_OF) += of_device.o
obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_dma.o
obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_iommu.o
ifdef CONFIG_PPC_ISERIES
obj-$(CONFIG_PCI) += iSeries_pci.o iSeries_pci_reset.o \
......@@ -28,12 +29,12 @@ obj-$(CONFIG_PPC_ISERIES) += iSeries_irq.o \
HvCall.o HvLpConfig.o LparData.o mf_proc.o \
iSeries_setup.o ItLpQueue.o hvCall.o \
mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
proc_pmc.o
proc_pmc.o iSeries_iommu.o
obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
eeh.o nvram.o pSeries_nvram.o rtasd.o ras.o \
open_pic.o xics.o pSeries_htab.o rtas.o \
chrp_setup.o i8259.o prom.o vio.o
chrp_setup.o i8259.o prom.o vio.o pSeries_iommu.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
......
......@@ -51,7 +51,7 @@
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/machdep.h>
#include <asm/irq.h>
......
/*
* arch/ppc64/kernel/iSeries_iommu.c
*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup:
*
* Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
*
* Dynamic DMA mapping support, iSeries-specific parts.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/ppcdebug.h>
#include <asm/iSeries/HvCallXm.h>
#include <asm/iSeries/LparData.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/iSeries/iSeries_pci.h>
#include <asm/machdep.h>
#include "pci.h"
static struct iommu_table veth_iommu_table; /* Tce table for virtual ethernet */
static struct iommu_table vio_iommu_table; /* Tce table for virtual I/O */
static struct iSeries_Device_Node veth_dev_node = { .LogicalSlot = 0xFF, .iommu_table = &veth_iommu_table };
static struct iSeries_Device_Node vio_dev_node = { .LogicalSlot = 0xFF, .iommu_table = &vio_iommu_table };
static struct pci_dev _veth_dev = { .sysdata = &veth_dev_node };
static struct pci_dev _vio_dev = { .sysdata = &vio_dev_node, .dev.bus = &pci_bus_type };
struct pci_dev *iSeries_veth_dev = &_veth_dev;
struct device *iSeries_vio_dev = &_vio_dev.dev;
extern struct list_head iSeries_Global_Device_List;
static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, int direction)
{
u64 rc;
union tce_entry tce;
while (npages--) {
tce.te_word = 0;
tce.te_bits.tb_rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT;
if (tbl->it_type == TCE_VB) {
/* Virtual Bus */
tce.te_bits.tb_valid = 1;
tce.te_bits.tb_allio = 1;
if (direction != PCI_DMA_TODEVICE)
tce.te_bits.tb_rdwr = 1;
} else {
/* PCI Bus */
tce.te_bits.tb_rdwr = 1; /* Read allowed */
if (direction != PCI_DMA_TODEVICE)
tce.te_bits.tb_pciwr = 1;
}
rc = HvCallXm_setTce((u64)tbl->it_index,
(u64)index,
tce.te_word);
if (rc)
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", rc);
index++;
uaddr += PAGE_SIZE;
}
}
static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
{
u64 rc;
union tce_entry tce;
while (npages--) {
tce.te_word = 0;
rc = HvCallXm_setTce((u64)tbl->it_index,
(u64)index,
tce.te_word);
if (rc)
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", rc);
index++;
}
}
void __init iommu_vio_init(void)
{
struct iommu_table *t;
struct iommu_table_cb cb;
unsigned long cbp;
cb.itc_busno = 255; /* Bus 255 is the virtual bus */
cb.itc_virtbus = 0xff; /* Ask for virtual bus */
cbp = virt_to_absolute((unsigned long)&cb);
HvCallXm_getTceTableParms(cbp);
veth_iommu_table.it_size = cb.itc_size / 2;
veth_iommu_table.it_busno = cb.itc_busno;
veth_iommu_table.it_offset = cb.itc_offset;
veth_iommu_table.it_index = cb.itc_index;
veth_iommu_table.it_type = TCE_VB;
veth_iommu_table.it_entrysize = sizeof(union tce_entry);
veth_iommu_table.it_blocksize = 1;
t = iommu_init_table(&veth_iommu_table);
if (!t)
printk("Virtual Bus VETH TCE table failed.\n");
vio_iommu_table.it_size = cb.itc_size - veth_iommu_table.it_size;
vio_iommu_table.it_busno = cb.itc_busno;
vio_iommu_table.it_offset = cb.itc_offset +
veth_iommu_table.it_size * (PAGE_SIZE/sizeof(union tce_entry));
vio_iommu_table.it_index = cb.itc_index;
vio_iommu_table.it_type = TCE_VB;
vio_iommu_table.it_entrysize = sizeof(union tce_entry);
vio_iommu_table.it_blocksize = 1;
t = iommu_init_table(&vio_iommu_table);
if (!t)
printk("Virtual Bus VIO TCE table failed.\n");
}
/*
* This function compares the known tables to find an iommu_table
* that has already been built for hardware TCEs.
*/
static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
{
struct iSeries_Device_Node *dp;
for (dp = (struct iSeries_Device_Node *)iSeries_Global_Device_List.next;
dp != (struct iSeries_Device_Node *)&iSeries_Global_Device_List;
dp = (struct iSeries_Device_Node *)dp->Device_List.next)
if (dp->iommu_table != NULL &&
dp->iommu_table->it_type == TCE_PCI &&
dp->iommu_table->it_offset == tbl->it_offset &&
dp->iommu_table->it_index == tbl->it_index &&
dp->iommu_table->it_size == tbl->it_size)
return dp->iommu_table;
return NULL;
}
/*
* Call Hv with the architected data structure to get TCE table info.
* info. Put the returned data into the Linux representation of the
* TCE table data.
* The Hardware Tce table comes in three flavors.
* 1. TCE table shared between Buses.
* 2. TCE table per Bus.
* 3. TCE Table per IOA.
*/
static void iommu_table_getparms(struct iSeries_Device_Node* dn,
struct iommu_table* tbl)
{
struct iommu_table_cb *parms;
parms = (struct iommu_table_cb*)kmalloc(sizeof(*parms), GFP_KERNEL);
if (parms == NULL)
panic("PCI_DMA: TCE Table Allocation failed.");
memset(parms, 0, sizeof(*parms));
parms->itc_busno = ISERIES_BUS(dn);
parms->itc_slotno = dn->LogicalSlot;
parms->itc_virtbus = 0;
HvCallXm_getTceTableParms(REALADDR(parms));
if (parms->itc_size == 0)
panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
tbl->it_size = parms->itc_size;
tbl->it_busno = parms->itc_busno;
tbl->it_offset = parms->itc_offset;
tbl->it_index = parms->itc_index;
tbl->it_entrysize = sizeof(union tce_entry);
tbl->it_blocksize = 1;
tbl->it_type = TCE_PCI;
kfree(parms);
}
void iommu_devnode_init(struct iSeries_Device_Node *dn) {
struct iommu_table *tbl;
tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
iommu_table_getparms(dn, tbl);
/* Look for existing tce table */
dn->iommu_table = iommu_table_find(tbl);
if (dn->iommu_table == NULL)
dn->iommu_table = iommu_init_table(tbl);
else
kfree(tbl);
return;
}
void tce_init_iSeries(void)
{
ppc_md.tce_build = tce_build_iSeries;
ppc_md.tce_free = tce_free_iSeries;
pci_iommu_init();
}
......@@ -36,7 +36,7 @@
#include <asm/pci-bridge.h>
#include <asm/ppcdebug.h>
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include <asm/iSeries/HvCallPci.h>
#include <asm/iSeries/HvCallSm.h>
......@@ -53,7 +53,7 @@ extern int panic_timeout;
extern unsigned long iSeries_Base_Io_Memory;
extern struct TceTable *tceTables[256];
extern struct iommu_table *tceTables[256];
extern void iSeries_MmIoTest(void);
......@@ -273,7 +273,7 @@ void __init iSeries_pci_final_fixup(void)
iSeries_Device_Information(pdev, Buffer,
sizeof(Buffer));
printk("%d. %s\n", DeviceCount, Buffer);
create_pci_bus_tce_table((unsigned long)node);
iommu_devnode_init(node);
} else
printk("PCI: Device Tree not found for 0x%016lX\n",
(unsigned long)pdev);
......
This diff is collapsed.
/*
* arch/ppc64/kernel/pSeries_iommu.c
*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup:
*
* Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
*
* Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/ppcdebug.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include "pci.h"
/* Only used to pass OF initialization data set in prom.c into the main
* kernel code -- data ultimately copied into regular tce tables.
*/
extern struct _of_tce_table of_tce_table[];
extern struct pci_controller *hose_head;
extern struct pci_controller **hose_tail;
static void tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
int direction)
{
union tce_entry t;
union tce_entry *tp;
t.te_word = 0;
t.te_rdwr = 1; // Read allowed
if (direction != PCI_DMA_TODEVICE)
t.te_pciwr = 1;
tp = ((union tce_entry *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross LMB boundary */
t.te_rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT;
tp->te_word = t.te_word;
uaddr += PAGE_SIZE;
tp++;
}
}
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
union tce_entry t;
union tce_entry *tp;
t.te_word = 0;
tp = ((union tce_entry *)tbl->it_base) + index;
while (npages--) {
tp->te_word = t.te_word;
tp++;
}
}
static void iommu_buses_init(void)
{
struct pci_controller* phb;
struct device_node *dn, *first_dn;
int num_slots, num_slots_ilog2;
int first_phb = 1;
/* XXX Should we be using pci_root_buses instead? -ojn
*/
for (phb=hose_head; phb; phb=phb->next) {
first_dn = ((struct device_node *)phb->arch_data)->child;
/* Carve 2GB into the largest dma_window_size possible */
for (dn = first_dn, num_slots = 0; dn != NULL; dn = dn->sibling)
num_slots++;
num_slots_ilog2 = __ilog2(num_slots);
if ((1<<num_slots_ilog2) != num_slots)
num_slots_ilog2++;
phb->dma_window_size = 1 << (22 - num_slots_ilog2);
/* Reserve 16MB of DMA space on the first PHB.
* We should probably be more careful and use firmware props.
* In reality this space is remapped, not lost. But we don't
* want to get that smart to handle it -- too much work.
*/
phb->dma_window_base_cur = first_phb ? (1 << 12) : 0;
first_phb = 0;
for (dn = first_dn; dn != NULL; dn = dn->sibling)
iommu_devnode_init(dn);
}
}
static void iommu_buses_init_lpar(struct list_head *bus_list)
{
struct list_head *ln;
struct pci_bus *bus;
struct device_node *busdn;
unsigned int *dma_window;
for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
bus = pci_bus_b(ln);
busdn = PCI_GET_DN(bus);
dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", 0);
if (dma_window) {
/* Bussubno hasn't been copied yet.
* Do it now because iommu_table_setparms_lpar needs it.
*/
busdn->bussubno = bus->number;
iommu_devnode_init(busdn);
}
/* look for a window on a bridge even if the PHB had one */
iommu_buses_init_lpar(&bus->children);
}
}
static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl)
{
phandle node;
unsigned long i;
struct _of_tce_table *oft;
node = ((struct device_node *)(phb->arch_data))->node;
oft = NULL;
for (i=0; of_tce_table[i].node; i++)
if(of_tce_table[i].node == node) {
oft = &of_tce_table[i];
break;
}
if (!oft)
panic("PCI_DMA: iommu_table_setparms: Can't find phb named '%s' in of_tce_table\n", dn->full_name);
memset((void *)oft->base, 0, oft->size);
tbl->it_busno = phb->bus->number;
/* Units of tce entries */
tbl->it_offset = phb->dma_window_base_cur;
/* Adjust the current table offset to the next
* region. Measured in TCE entries. Force an
* alignment to the size allotted per IOA. This
* makes it easier to remove the 1st 16MB.
*/
phb->dma_window_base_cur += (phb->dma_window_size>>3);
phb->dma_window_base_cur &=
~((phb->dma_window_size>>3)-1);
/* Set the tce table size - measured in pages */
tbl->it_size = ((phb->dma_window_base_cur -
tbl->it_offset) << 3) >> PAGE_SHIFT;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur > (1 << 19))
panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
tbl->it_base = oft->base;
tbl->it_index = 0;
tbl->it_entrysize = sizeof(union tce_entry);
tbl->it_blocksize = 16;
}
/*
* iommu_table_setparms_lpar
*
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
*
* ToDo: properly interpret the ibm,dma-window property. The definition is:
* logical-bus-number (1 word)
* phys-address (#address-cells words)
* size (#cell-size words)
*
* Currently we hard code these sizes (more or less).
*/
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl)
{
unsigned int *dma_window;
dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", 0);
if (!dma_window)
panic("iommu_table_setparms_lpar: device %s has no"
" ibm,dma-window property!\n", dn->full_name);
tbl->it_busno = dn->bussubno;
tbl->it_size = (((((unsigned long)dma_window[4] << 32) |
(unsigned long)dma_window[5]) >> PAGE_SHIFT) << 3) >> PAGE_SHIFT;
tbl->it_offset = ((((unsigned long)dma_window[2] << 32) |
(unsigned long)dma_window[3]) >> 12);
tbl->it_base = 0;
tbl->it_index = dma_window[0];
tbl->it_entrysize = sizeof(union tce_entry);
tbl->it_blocksize = 16;
}
void iommu_devnode_init(struct device_node *dn)
{
struct iommu_table *tbl;
tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
GFP_KERNEL);
if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
iommu_table_setparms_lpar(dn->phb, dn, tbl);
else
iommu_table_setparms(dn->phb, dn, tbl);
dn->iommu_table = iommu_init_table(tbl);
}
void iommu_setup_pSeries(void)
{
struct pci_dev *dev = NULL;
struct device_node *dn, *mydn;
if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
iommu_buses_init_lpar(&pci_root_buses);
else
iommu_buses_init();
/* Now copy the iommu_table ptr from the bus devices down to every
* pci device_node. This means get_iommu_table() won't need to search
* up the device tree to find it.
*/
while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
mydn = dn = PCI_GET_DN(dev);
while (dn && dn->iommu_table == NULL)
dn = dn->parent;
if (dn)
mydn->iommu_table = dn->iommu_table;
}
}
/* These are called very early. */
void tce_init_pSeries(void)
{
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
pci_iommu_init();
}
......@@ -29,7 +29,7 @@
#include <asm/abs_addr.h>
#include <asm/mmu_context.h>
#include <asm/ppcdebug.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include <linux/pci.h>
#include <asm/naca.h>
#include <asm/tlbflush.h>
......@@ -122,51 +122,59 @@ long plpar_put_term_char(unsigned long termno,
lbuf[1]);
}
static void tce_build_pSeriesLP(struct TceTable *tbl, long tcenum,
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages,
unsigned long uaddr, int direction )
{
u64 set_tce_rc;
union Tce tce;
PPCDBG(PPCDBG_TCE, "build_tce: uaddr = 0x%lx\n", uaddr);
PPCDBG(PPCDBG_TCE, "\ttcenum = 0x%lx, tbl = 0x%lx, index=%lx\n",
tcenum, tbl, tbl->index);
tce.wholeTce = 0;
tce.tceBits.rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT;
tce.tceBits.readWrite = 1;
if ( direction != PCI_DMA_TODEVICE ) tce.tceBits.pciWrite = 1;
set_tce_rc = plpar_tce_put((u64)tbl->index,
(u64)tcenum << 12,
tce.wholeTce );
if(set_tce_rc) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", set_tce_rc);
printk("\tindex = 0x%lx\n", (u64)tbl->index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
printk("\ttce val = 0x%lx\n", tce.wholeTce );
u64 rc;
union tce_entry tce;
tce.te_word = 0;
tce.te_rpn = (virt_to_absolute(uaddr)) >> PAGE_SHIFT;
tce.te_rdwr = 1;
if (direction != PCI_DMA_TODEVICE)
tce.te_pciwr = 1;
while (npages--) {
rc = plpar_tce_put((u64)tbl->it_index,
(u64)tcenum << 12,
tce.te_word );
if(rc && printk_ratelimit()) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
printk("\ttce val = 0x%lx\n", tce.te_word );
show_stack(current, (unsigned long *)__get_SP());
}
tcenum++;
tce.te_rpn++;
}
}
static void tce_free_one_pSeriesLP(struct TceTable *tbl, long tcenum)
static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
u64 set_tce_rc;
union Tce tce;
tce.wholeTce = 0;
set_tce_rc = plpar_tce_put((u64)tbl->index,
(u64)tcenum << 12,
tce.wholeTce );
if ( set_tce_rc ) {
printk("tce_free_one_pSeriesLP: plpar_tce_put failed\n");
printk("\trc = %ld\n", set_tce_rc);
printk("\tindex = 0x%lx\n", (u64)tbl->index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
printk("\ttce val = 0x%lx\n", tce.wholeTce );
u64 rc;
union tce_entry tce;
tce.te_word = 0;
while (npages--) {
rc = plpar_tce_put((u64)tbl->it_index,
(u64)tcenum << 12,
tce.te_word );
if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed\n");
printk("\trc = %ld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
printk("\ttce val = 0x%lx\n", tce.te_word );
show_stack(current, (unsigned long *)__get_SP());
}
tcenum++;
}
}
int vtermno; /* virtual terminal# for udbg */
......@@ -298,8 +306,10 @@ void pSeriesLP_init_early(void)
tce_init_pSeries();
ppc_md.tce_build = tce_build_pSeriesLP;
ppc_md.tce_free_one = tce_free_one_pSeriesLP;
ppc_md.tce_build = tce_build_pSeriesLP;
ppc_md.tce_free = tce_free_pSeriesLP;
pci_iommu_init();
#ifdef CONFIG_SMP
smp_init_pSeries();
......
......@@ -39,7 +39,7 @@
#include <asm/pci-bridge.h>
#include <asm/ppcdebug.h>
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include "open_pic.h"
#include "pci.h"
......@@ -699,7 +699,7 @@ void __init pSeries_final_fixup(void)
phbs_fixup_io();
chrp_request_regions();
pci_fix_bus_sysdata();
create_tce_tables();
iommu_setup_pSeries();
}
/***********************************************************************
......
......@@ -33,7 +33,7 @@
#include <asm/uaccess.h>
#include <asm/ppcdebug.h>
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include "pci.h"
......
This diff is collapsed.
......@@ -36,7 +36,7 @@
#include <asm/pci-bridge.h>
#include <asm/ppcdebug.h>
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include "pci.h"
......
/*
* arch/ppc64/kernel/pci_iommu.c
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup, new allocation schemes:
* Copyright (C) 2004 Olof Johansson, IBM Corporation
*
* Dynamic DMA mapping support, platform-independent parts.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include "pci.h"
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/iSeries_pci.h>
#endif /* CONFIG_PPC_ISERIES */
#define DBG(...)
static inline struct iommu_table *devnode_table(struct pci_dev *dev)
{
if (!dev)
dev = ppc64_isabridge_dev;
if (!dev)
return NULL;
#ifdef CONFIG_PPC_ISERIES
return ISERIES_DEVNODE(dev)->iommu_table;
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_PSERIES
return PCI_GET_DN(dev)->iommu_table;
#endif /* CONFIG_PPC_PSERIES */
}
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
struct iommu_table *tbl;
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
order = get_order(size);
/* Client asked for way too much space. This is checked later anyway */
/* It is easier to debug here for the drivers than in the tce tables.*/
if (order >= IOMAP_MAX_ORDER) {
printk("PCI_DMA: pci_alloc_consistent size too large: 0x%lx\n",
size);
return (void *)NO_TCE;
}
tbl = devnode_table(hwdev);
if (!tbl)
return NULL;
/* Alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(GFP_ATOMIC, order);
if (!ret)
return NULL;
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, PCI_DMA_BIDIRECTIONAL, NULL);
/* Make sure the update is visible to hardware. */
mb();
if (mapping == NO_TCE) {
free_pages((unsigned long)ret, order);
ret = NULL;
} else
*dma_handle = mapping;
return ret;
}
void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
struct iommu_table *tbl;
unsigned int npages;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
tbl = devnode_table(hwdev);
if (tbl) {
iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address of the buffer
* passed here is the kernel (virtual) address of the buffer. The buffer
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
size_t size, int direction)
{
struct iommu_table * tbl;
dma_addr_t dma_handle = NO_TCE;
unsigned long uaddr;
unsigned int npages;
unsigned long handle = 0;
BUG_ON(direction == PCI_DMA_NONE);
uaddr = (unsigned long)vaddr;
npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
npages >>= PAGE_SHIFT;
tbl = devnode_table(hwdev);
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction, &handle);
if (dma_handle == NO_TCE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %p npages %d\n",
tbl, vaddr, npages);
}
} else
dma_handle |= (uaddr & ~PAGE_MASK);
}
mb();
return dma_handle;
}
void pci_iommu_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
struct iommu_table *tbl;
unsigned int npages;
BUG_ON(direction == PCI_DMA_NONE);
npages = (PAGE_ALIGN(dma_handle + size) - (dma_handle & PAGE_MASK))
>> PAGE_SHIFT;
tbl = devnode_table(hwdev);
if (tbl)
iommu_free(tbl, dma_handle, npages);
}
int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
int direction)
{
struct iommu_table * tbl;
unsigned long handle;
BUG_ON(direction == PCI_DMA_NONE);
if (nelems == 0)
return 0;
tbl = devnode_table(pdev);
if (!tbl)
return 0;
handle = 0;
return iommu_alloc_sg(tbl, sglist, nelems, direction, &handle);
}
void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
int direction)
{
struct iommu_table *tbl;
BUG_ON(direction == PCI_DMA_NONE);
tbl = devnode_table(pdev);
if (!tbl)
return;
iommu_free_sg(tbl, sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct pci_dev *pdev, u64 mask)
{
return 1;
}
void pci_iommu_init(void)
{
pci_dma_ops.pci_alloc_consistent = pci_iommu_alloc_consistent;
pci_dma_ops.pci_free_consistent = pci_iommu_free_consistent;
pci_dma_ops.pci_map_single = pci_iommu_map_single;
pci_dma_ops.pci_unmap_single = pci_iommu_unmap_single;
pci_dma_ops.pci_map_sg = pci_iommu_map_sg;
pci_dma_ops.pci_unmap_sg = pci_iommu_unmap_sg;
pci_dma_ops.pci_dma_supported = pci_iommu_dma_supported;
}
......@@ -60,7 +60,7 @@
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/dma.h>
#include <asm/bootx.h>
......@@ -181,8 +181,9 @@ void __init pmac_setup_arch(void)
#ifdef CONFIG_SMP
pmac_setup_smp();
#endif
/* Setup the PCI DMA to "direct" for now, until we have proper
* DART support and can deal with more than 2Gb of RAM
/* Setup the PCI DMA to "direct" by default. May be overriden
* by iommu later on
*/
pci_dma_init_direct();
......
......@@ -47,7 +47,7 @@
#include <asm/bitops.h>
#include <asm/naca.h>
#include <asm/pci.h>
#include <asm/pci_dma.h>
#include <asm/iommu.h>
#include <asm/bootinfo.h>
#include <asm/ppcdebug.h>
#include <asm/btext.h>
......@@ -3003,15 +3003,15 @@ static int of_finish_dynamic_node(struct device_node *node)
node->devfn = (regs[0] >> 8) & 0xff;
}
/* fixing up tce_table */
/* fixing up iommu_table */
if(strcmp(node->name, "pci") == 0 &&
get_property(node, "ibm,dma-window", NULL)) {
node->bussubno = node->busno;
create_pci_bus_tce_table((unsigned long)node);
iommu_devnode_init(node);
}
else
node->tce_table = parent->tce_table;
node->iommu_table = parent->iommu_table;
out:
of_node_put(parent);
......
This diff is collapsed.
......@@ -60,10 +60,8 @@
#include <asm/ppcdebug.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/iSeries_dma.h>
#endif
struct mmu_context_queue_t mmu_context_queue;
int mem_init_done;
......@@ -885,7 +883,7 @@ void __init mem_init(void)
mem_init_done = 1;
#ifdef CONFIG_PPC_ISERIES
create_virtual_bus_tce_table();
iommu_vio_init();
#endif
}
......
/*
* iSeries_dma.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ISERIES_DMA_H
#define _ISERIES_DMA_H
#include <asm/types.h>
#include <linux/spinlock.h>
// NUM_TCE_LEVELS defines the largest contiguous block
// of dma (tce) space we can get. NUM_TCE_LEVELS = 10
// allows up to 2**9 pages (512 * 4096) = 2 MB
#define NUM_TCE_LEVELS 10
#define NO_TCE ((dma_addr_t)-1)
// Tces come in two formats, one for the virtual bus and a different
// format for PCI
#define TCE_VB 0
#define TCE_PCI 1
union Tce {
u64 wholeTce;
struct {
u64 cacheBits :6; /* Cache hash bits - not used */
u64 rsvd :6;
u64 rpn :40; /* Absolute page number */
u64 valid :1; /* Tce is valid (vb only) */
u64 allIo :1; /* Tce is valid for all lps (vb only) */
u64 lpIndex :8; /* LpIndex for user of TCE (vb only) */
u64 pciWrite :1; /* Write allowed (pci only) */
u64 readWrite :1; /* Read allowed (pci), Write allowed
(vb) */
} tceBits;
};
struct Bitmap {
unsigned long numBits;
unsigned long numBytes;
unsigned char * map;
};
struct MultiLevelBitmap {
unsigned long maxLevel;
struct Bitmap level[NUM_TCE_LEVELS];
};
struct TceTable {
u64 busNumber;
u64 size;
u64 startOffset;
u64 index;
spinlock_t lock;
struct MultiLevelBitmap mlbm;
};
struct HvTceTableManagerCB {
u64 busNumber; /* Bus number for this tce table */
u64 start; /* Will be NULL for secondary */
u64 totalSize; /* Size (in pages) of whole table */
u64 startOffset; /* Index into real tce table of the
start of our section */
u64 size; /* Size (in pages) of our section */
u64 index; /* Index of this tce table (token?) */
u16 maxTceTableIndex; /* Max number of tables for partition */
u8 virtualBusFlag; /* Flag to indicate virtual bus */
u8 rsvd[5];
};
extern struct TceTable virtBusTceTable; /* Tce table for virtual bus */
extern struct TceTable * build_tce_table( struct HvTceTableManagerCB *,
struct TceTable *);
extern void create_virtual_bus_tce_table( void );
extern void create_pci_bus_tce_table( unsigned busNumber );
#endif /* _ISERIES_DMA_H */
......@@ -92,7 +92,7 @@ struct iSeries_Device_Node {
int Flags; /* Possible flags(disable/bist)*/
u16 Vendor; /* Vendor ID */
u8 LogicalSlot; /* Hv Slot Index for Tces */
struct TceTable* DevTceTable; /* Device TCE Table */
struct iommu_table* iommu_table;/* Device TCE Table */
u8 PhbId; /* Phb Card is on. */
u16 Board; /* Board Number */
u8 FrameId; /* iSeries spcn Frame Id */
......
......@@ -181,9 +181,12 @@ static inline void * phys_to_virt(unsigned long address)
*/
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#if 0
#define BIO_VMERGE_BOUNDARY 4096
#endif
/* We do NOT want virtual merging, it would put too much pressure on
* our iommu allocator. Instead, we want drivers to be smart enough
* to coalesce sglists that happen to have been mapped in a contiguous
* way by the iommu
*/
#define BIO_VMERGE_BOUNDARY 0
#endif /* __KERNEL__ */
......
/*
* iommu.h
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Rewrite, cleanup:
* Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PCI_DMA_H
#define _PCI_DMA_H
#include <asm/types.h>
#include <linux/spinlock.h>
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
* of dma (tce) space we can get. IOMAP_MAX_ORDER = 10
* allows up to 2**9 pages (512 * 4096) = 2 MB
*/
#define IOMAP_MAX_ORDER 10
#define NO_TCE ((dma_addr_t)-1)
/*
* Tces come in two formats, one for the virtual bus and a different
* format for PCI
*/
#define TCE_VB 0
#define TCE_PCI 1
/* tce_entry
* Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
* abstracted so layout is irrelevant.
*/
union tce_entry {
unsigned long te_word;
struct {
unsigned int tb_cacheBits :6; /* Cache hash bits - not used */
unsigned int tb_rsvd :6;
unsigned long tb_rpn :40; /* Real page number */
unsigned int tb_valid :1; /* Tce is valid (vb only) */
unsigned int tb_allio :1; /* Tce is valid for all lps (vb only) */
unsigned int tb_lpindex :8; /* LpIndex for user of TCE (vb only) */
unsigned int tb_pciwr :1; /* Write allowed (pci only) */
unsigned int tb_rdwr :1; /* Read allowed (pci), Write allowed (vb) */
} te_bits;
#define te_cacheBits te_bits.tb_cacheBits
#define te_rpn te_bits.tb_rpn
#define te_valid te_bits.tb_valid
#define te_allio te_bits.tb_allio
#define te_lpindex te_bits.tb_lpindex
#define te_pciwr te_bits.tb_pciwr
#define te_rdwr te_bits.tb_rdwr
};
struct iommu_table {
unsigned long it_busno; /* Bus number this table belongs to */
unsigned long it_size; /* Size in pages of iommu table */
unsigned long it_offset; /* Offset into global table */
unsigned long it_base; /* mapped address of tce table */
unsigned long it_index; /* which iommu table this is */
unsigned long it_type; /* type: PCI or Virtual Bus */
unsigned long it_entrysize; /* Size of an entry in bytes */
unsigned long it_blocksize; /* Entries in each block (cacheline) */
unsigned long it_hint; /* Hint for next alloc */
unsigned long it_largehint; /* Hint for large allocs */
spinlock_t it_lock; /* Protects it_map */
unsigned long it_mapsize; /* Size of map in # of entries (bits) */
unsigned long *it_map; /* A simple allocation bitmap for now */
};
#ifdef CONFIG_PPC_ISERIES
struct iommu_table_cb {
unsigned long itc_busno; /* Bus number for this tce table */
unsigned long itc_start; /* Will be NULL for secondary */
unsigned long itc_totalsize; /* Size (in pages) of whole table */
unsigned long itc_offset; /* Index into real tce table of the
start of our section */
unsigned long itc_size; /* Size (in pages) of our section */
unsigned long itc_index; /* Index of this tce table */
unsigned short itc_maxtables; /* Max num of tables for partition */
unsigned char itc_virtbus; /* Flag to indicate virtual bus */
unsigned char itc_slotno; /* IOA Tce Slot Index */
unsigned char itc_rsvd[4];
};
extern struct iommu_table vio_tce_table; /* Tce table for virtual bus */
#endif /* CONFIG_PPC_ISERIES */
struct scatterlist;
#ifdef CONFIG_PPC_PSERIES
/* Walks all buses and creates iommu tables */
extern void iommu_setup_pSeries(void);
extern void iommu_setup_pmac(void);
/* Creates table for an individual device node */
extern void iommu_devnode_init(struct device_node *dn);
#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_PPC_ISERIES
/* Walks all buses and creates iommu tables */
extern void iommu_setup_iSeries(void);
/* Initializes tables for bio buses */
extern void __init iommu_vio_init(void);
struct iSeries_Device_Node;
/* Creates table for an individual device node */
extern void iommu_devnode_init(struct iSeries_Device_Node *dn);
#endif /* CONFIG_PPC_ISERIES */
/* Initializes an iommu_table based in values set in the passed-in
* structure
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
/* allocates a range of tces and sets them to the pages */
extern dma_addr_t iommu_alloc(struct iommu_table *, void *page,
unsigned int numPages, int direction,
unsigned long *handle);
extern void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages);
/* same with sg lists */
extern int iommu_alloc_sg(struct iommu_table *table, struct scatterlist *sglist,
int nelems, int direction, unsigned long *handle);
extern void iommu_free_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, int direction);
extern void tce_init_pSeries(void);
extern void tce_init_iSeries(void);
extern void pci_iommu_init(void);
extern void pci_dma_init_direct(void);
#endif
......@@ -15,7 +15,7 @@
struct pt_regs;
struct pci_bus;
struct device_node;
struct TceTable;
struct iommu_table;
struct rtc_time;
#ifdef CONFIG_SMP
......@@ -53,12 +53,15 @@ struct machdep_calls {
unsigned long number,
int local);
void (*tce_build)(struct TceTable * tbl,
long tcenum,
void (*tce_build)(struct iommu_table * tbl,
long index,
long npages,
unsigned long uaddr,
int direction);
void (*tce_free_one)(struct TceTable *tbl,
long tcenum);
void (*tce_free)(struct iommu_table *tbl,
long index,
long npages);
void (*tce_flush)(struct iommu_table *tbl);
void (*setup_arch)(void);
/* Optional, may be NULL. */
......
......@@ -70,6 +70,8 @@ struct pci_dma_ops {
int nents, int direction);
void (*pci_unmap_sg)(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction);
int (*pci_dma_supported)(struct pci_dev *hwdev, u64 mask);
int (*pci_dac_dma_supported)(struct pci_dev *hwdev, u64 mask);
};
extern struct pci_dma_ops pci_dma_ops;
......@@ -130,10 +132,25 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
* We default to supporting only 32 bits DMA unless we have
* an explicit override of this function in pci_dma_ops for
* the platform
*/
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return 1;
if (pci_dma_ops.pci_dma_supported)
return pci_dma_ops.pci_dma_supported(hwdev, mask);
return (mask < 0x100000000ull);
}
/* For DAC DMA, we currently don't support it by default, but
* we let the platform override this
*/
static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
{
if (pci_dma_ops.pci_dac_dma_supported)
return pci_dma_ops.pci_dac_dma_supported(hwdev, mask);
return 0;
}
extern int pci_domain_nr(struct pci_bus *bus);
......@@ -167,8 +184,6 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
#define pci_dac_dma_supported(pci_dev, mask) (0)
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
......
/*
* pci_dma.h
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _PCI_DMA_H
#define _PCI_DMA_H
#include <asm/types.h>
#include <linux/spinlock.h>
/*
* NUM_TCE_LEVELS defines the largest contiguous block
* of dma (tce) space we can get. NUM_TCE_LEVELS = 10
* allows up to 2**9 pages (512 * 4096) = 2 MB
*/
#define NUM_TCE_LEVELS 10
#define NO_TCE ((dma_addr_t)-1)
/*
* Tces come in two formats, one for the virtual bus and a different
* format for PCI
*/
#define TCE_VB 0
#define TCE_PCI 1
union Tce {
u64 wholeTce;
struct {
u64 cacheBits :6; /* Cache hash bits - not used */
u64 rsvd :6;
u64 rpn :40; /* Absolute page number */
u64 valid :1; /* Tce is valid (vb only) */
u64 allIo :1; /* Tce is valid for all lps (vb only) */
u64 lpIndex :8; /* LpIndex for user of TCE (vb only) */
u64 pciWrite :1; /* Write allowed (pci only) */
u64 readWrite :1; /* Read allowed (pci), Write allowed (vb) */
} tceBits;
};
struct Bitmap {
unsigned long numBits;
unsigned long numBytes;
unsigned char * map;
};
struct MultiLevelBitmap {
unsigned long maxLevel;
struct Bitmap level[NUM_TCE_LEVELS];
};
struct TceTable {
u64 busNumber;
u64 size;
u64 startOffset;
u64 base; /* pSeries native only */
u64 index;
u64 tceType;
spinlock_t lock;
struct MultiLevelBitmap mlbm;
};
struct TceTableManagerCB {
u64 busNumber; /* Bus number for this tce table */
u64 start; /* Will be NULL for secondary */
u64 totalSize; /* Size (in pages) of whole table */
u64 startOffset; /* Index into real tce table of the
start of our section */
u64 size; /* Size (in pages) of our section */
u64 index; /* Index of this tce table (token?) */
u16 maxTceTableIndex; /* Max num of tables for partition */
u8 virtualBusFlag; /* Flag to indicate virtual bus */
u8 logicalSlot; /* IOA Tce Slot Index */
u8 rsvd[4];
};
extern struct TceTable virtBusTceTable; /* Tce table for virtual bus */
extern void create_tce_tables(void);
extern void create_pci_bus_tce_table(unsigned long);
extern void tce_init_pSeries(void);
extern void tce_init_iSeries(void);
extern void pci_dma_init_direct(void);
#endif
......@@ -134,7 +134,7 @@ struct property {
* indication of a real PCI node. Other nodes leave these fields zeroed.
*/
struct pci_controller;
struct TceTable;
struct iommu_table;
struct device_node {
char *name;
char *type;
......@@ -155,7 +155,7 @@ struct device_node {
int eeh_mode; /* See eeh.h for possible EEH_MODEs */
int eeh_config_addr;
struct pci_controller *phb; /* for pci devices */
struct TceTable *tce_table; /* for phb's or bridges */
struct iommu_table *iommu_table; /* for phb's or bridges */
struct property *properties;
struct device_node *parent;
......
......@@ -38,7 +38,7 @@
struct vio_dev;
struct vio_driver;
struct vio_device_id;
struct TceTable;
struct iommu_table;
int vio_register_driver(struct vio_driver *drv);
void vio_unregister_driver(struct vio_driver *drv);
......@@ -48,7 +48,7 @@ struct vio_dev * __devinit vio_register_device(struct device_node *node_vdev);
void __devinit vio_unregister_device(struct vio_dev *dev);
const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length);
int vio_get_irq(struct vio_dev *dev);
struct TceTable * vio_build_tce_table(struct vio_dev *dev);
struct iommu_table * vio_build_iommu_table(struct vio_dev *dev);
int vio_enable_interrupts(struct vio_dev *dev);
int vio_disable_interrupts(struct vio_dev *dev);
......@@ -95,7 +95,7 @@ struct vio_dev {
struct device_node *archdata; /* Open Firmware node */
void *driver_data; /* data private to the driver */
unsigned long unit_address;
struct TceTable *tce_table; /* vio_map_* uses this */
struct iommu_table *iommu_table; /* vio_map_* uses this */
unsigned int irq;
struct device dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment