Commit 254b6b89 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86-64 IOMMU & PCI updates

Update for the x86-64 PCI subsystem in 2.5.42.  Main new feature is PCI
IOMMU support through the K8 aperture.  This allows to use more than 4GB
of memory with 32bit PCI devices.  Also some other PCI changes, mostly
merges from i386.
parent 0e97e2a2
/*
* Firmware replacement code.
*
* Work around broken BIOSes that don't set an aperture.
* The IOMMU code needs an aperture even who no AGP is present in the system.
* Map the aperture over some low memory. This is cheaper than doing bounce
* buffering. The memory is lost. This is done at early boot because only
* the bootmem allocator can allocate 32+MB.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
* $Id: aperture.c,v 1.2 2002/09/19 19:25:32 ak Exp $
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/pci_ids.h>
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/proto.h>
#include <asm/pci-direct.h>
int fallback_aper_order __initdata = 1; /* 64MB */
int fallback_aper_force __initdata = 0;
extern int no_iommu, force_mmu;
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
static u32 __init allocate_aperture(void)
{
#ifdef CONFIG_DISCONTIGMEM
pg_data_t *nd0 = NODE_DATA(0);
#else
pg_data_t *nd0 = &contig_page_data;
#endif
u32 aper_size;
void *p;
if (fallback_aper_order > 7)
fallback_aper_order = 7;
aper_size = (32 * 1024 * 1024) << fallback_aper_order;
/*
* Aperture has to be naturally aligned it seems. This means an
* 2GB aperture won't have much changes to succeed in the lower 4GB of
* memory. Unfortunately we cannot move it up because that would make
* the IOMMU useless.
*/
p = __alloc_bootmem_node(nd0, aper_size, aper_size, 0);
if (!p || __pa(p)+aper_size > 0xffffffff) {
printk("Cannot allocate aperture memory hole (%p,%uK)\n",
p, aper_size>>10);
if (p)
free_bootmem((unsigned long)p, aper_size);
return 0;
}
printk("Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, __pa(p));
return (u32)__pa(p);
}
void __init iommu_hole_init(void)
{
int fix, num;
u32 aper_size, aper_alloc, aper_order;
u64 aper_base;
if (no_iommu)
return;
if (end_pfn < (0xffffffff>>PAGE_SHIFT) && !force_mmu)
return;
printk("Checking aperture...\n");
fix = 0;
for (num = 24; num < 32; num++) {
if (read_pci_config(0, num, 3, 0x00) != NB_ID_3)
continue;
aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
aper_size = (32 * 1024 * 1024) << aper_order;
aper_base = read_pci_config(0, num, 3, 0x94) & 0x7fff;
aper_base <<= 25;
printk("CPU %d: aperture @ %Lx size %u KB\n", num-24,
aper_base, aper_size>>10);
if (!aper_base || aper_base + aper_size >= 0xffffffff) {
fix = 1;
break;
}
if (e820_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
printk("Aperture pointing to e820 RAM. Ignoring.\n");
fix = 1;
break;
}
}
if (!fix && !fallback_aper_force)
return;
printk("Your BIOS is broken and doesn't leave a aperture memory hole\n");
aper_alloc = allocate_aperture();
if (!aper_alloc)
return;
for (num = 24; num < 32; num++) {
if (read_pci_config(0, num, 3, 0x00) != NB_ID_3)
continue;
/* Don't enable translation yet. That is done later.
Assume this BIOS didn't initialise the GART so
just overwrite all previous bits */
write_pci_config(0, num, 3, 0x90, fallback_aper_order<<1);
write_pci_config(0, num, 3, 0x94, aper_alloc>>25);
}
}
/*
* Dynamic DMA mapping support.
* Dynamic DMA mapping support. Common code
*/
#include <linux/types.h>
......@@ -8,24 +8,63 @@
#include <linux/pci.h>
#include <asm/io.h>
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
dma_addr_t bad_dma_address = -1UL;
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
void *ret;
int gfp = GFP_ATOMIC;
int i;
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
BUG_ON(direction == PCI_DMA_NONE);
/*
*
*/
for (i = 0; i < nents; i++ ) {
struct scatterlist *s = &sg[i];
if (s->page) {
s->dma_address = pci_map_page(hwdev, s->page, s->offset,
s->length, direction);
} else
BUG();
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_phys(ret);
if (unlikely(s->dma_address == bad_dma_address))
goto error;
}
return ret;
return nents;
error:
pci_unmap_sg(hwdev, sg, i, direction);
return 0;
}
void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
void pci_unmap_sg(struct pci_dev *dev, struct scatterlist *sg,
int nents, int dir)
{
free_pages((unsigned long)vaddr, get_order(size));
int i;
for (i = 0; i < nents; i++) {
struct scatterlist *s = &sg[i];
BUG_ON(s->page == NULL);
BUG_ON(s->dma_address == 0);
pci_unmap_single(dev, s->dma_address, s->length, dir);
}
}
/*
* Dynamic DMA mapping support for AMD Hammer.
*
* Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
* This allows to use PCI devices that only support 32bit addresses on systems
* with more than 4GB.
*
* See Documentation/DMA-mapping.txt for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
* $Id: pci-gart.c,v 1.12 2002/09/19 19:25:32 ak Exp $
*/
/*
* Notebook:
agpgart_be
check if the simple reservation scheme is enough.
possible future tuning:
fast path for sg streaming mappings
more intelligent flush strategy - flush only a single NB?
move boundary between IOMMU and AGP in GART dynamically
could use exact fit in the gart in alloc_consistent, not order of two.
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/bitops.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/cacheflush.h>
unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */
static unsigned long iommu_pages; /* .. and in pages */
u32 *iommu_gatt_base; /* Remapping table */
int no_iommu;
static int no_agp;
int force_mmu = 1;
extern int fallback_aper_order;
extern int fallback_aper_force;
/* Allocation bitmap for the remapping area */
static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
#define GPTE_MASK 0xfffffff000
#define GPTE_VALID 1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x,flag) (((x) & 0xfffffff0) | ((x) >> 28) | GPTE_VALID | (flag))
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((x) & 0xff0) << 28))
#define for_all_nb(dev) \
pci_for_each_dev(dev) \
if (dev->bus->number == 0 && PCI_FUNC(dev->devfn) == 3 && \
(PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
#define EMERGENCY_PAGES 32 /* = 128KB */
#ifdef CONFIG_AGP
extern int agp_init(void);
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif
/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static unsigned long alloc_iommu(int size)
{
unsigned long offset, flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
if (offset == -1)
offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size);
if (offset != -1) {
set_bit_string(iommu_gart_bitmap, offset, size);
next_bit = offset+size;
if (next_bit >= iommu_pages)
next_bit = 0;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
return offset;
}
static void free_iommu(unsigned long offset, int size)
{
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
clear_bit_string(iommu_gart_bitmap, offset, size);
next_bit = offset;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
static inline void flush_gart(void)
{
struct pci_dev *nb;
for_all_nb(nb) {
u32 flag;
pci_read_config_dword(nb, 0x9c, &flag); /* could cache this */
/* could complain for PTE walk errors here (bit 1 of flag) */
flag |= 1;
pci_write_config_dword(nb, 0x9c, flag);
}
}
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
void *memory;
int gfp = GFP_ATOMIC;
int order, i;
unsigned long iommu_page;
if (hwdev == NULL || hwdev->dma_mask < 0xffffffff || no_iommu)
gfp |= GFP_DMA;
/*
* First try to allocate continuous and use directly if already
* in lowmem.
*/
order = get_order(size);
memory = (void *)__get_free_pages(gfp, order);
if (memory == NULL) {
return NULL;
} else {
int high = (unsigned long)virt_to_bus(memory) + size
>= 0xffffffff;
int mmu = high;
if (force_mmu)
mmu = 1;
if (no_iommu) {
if (high) goto error;
mmu = 0;
}
memset(memory, 0, size);
if (!mmu) {
*dma_handle = virt_to_bus(memory);
return memory;
}
}
iommu_page = alloc_iommu(1<<order);
if (iommu_page == -1)
goto error;
/* Fill in the GATT, allocating pages as needed. */
for (i = 0; i < 1<<order; i++) {
unsigned long phys_mem;
void *mem = memory + i*PAGE_SIZE;
if (i > 0)
atomic_inc(&virt_to_page(mem)->count);
phys_mem = virt_to_phys(mem);
BUG_ON(phys_mem & ~PTE_MASK);
iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem,GPTE_COHERENT);
}
flush_gart();
*dma_handle = iommu_bus_base + (iommu_page << PAGE_SHIFT);
return memory;
error:
free_pages((unsigned long)memory, order);
return NULL;
}
/*
* Unmap consistent memory.
* The caller must ensure that the device has finished accessing the mapping.
*/
void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t bus)
{
u64 pte;
int order = get_order(size);
unsigned long iommu_page;
int i;
if (bus < iommu_bus_base || bus > iommu_bus_base + iommu_size) {
free_pages((unsigned long)vaddr, order);
return;
}
iommu_page = (bus - iommu_bus_base) / PAGE_SIZE;
for (i = 0; i < 1<<order; i++) {
pte = iommu_gatt_base[iommu_page + i];
BUG_ON((pte & GPTE_VALID) == 0);
iommu_gatt_base[iommu_page + i] = 0;
free_page((unsigned long) __va(GPTE_DECODE(pte)));
}
flush_gart();
free_iommu(iommu_page, 1<<order);
}
#ifdef CONFIG_IOMMU_LEAK
/* Debugging aid for drivers that don't free their IOMMU tables */
static void **iommu_leak_tab;
static int leak_trace;
int iommu_leak_dumppages = 20;
void dump_leak(void)
{
int i;
static int dump;
if (dump || !iommu_leak_tab) return;
dump = 1;
show_stack(NULL);
printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_dumppages);
for (i = 0; i < iommu_leak_dumppages; i++)
printk("[%lu: %lx] ",
iommu_pages-i,(unsigned long) iommu_leak_tab[iommu_pages-i]);
printk("\n");
}
#endif
static void iommu_full(struct pci_dev *dev, void *addr, size_t size, int dir)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* Return some non mapped prereserved space in the aperture and
* let the Northbridge deal with it. This will result in garbage
* in the IO operation. When the size exceeds the prereserved space
* memory corruption will occur or random memory will be DMAed
* out. Hopefully no network devices use single mappings that big.
*/
printk(KERN_ERR
"PCI-DMA: Error: ran out out IOMMU space for %p size %lu at device %s[%s]\n",
addr,size, dev ? dev->name : "?", dev ? dev->slot_name : "?");
if (size > PAGE_SIZE*EMERGENCY_PAGES) {
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Memory will be corrupted\n");
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Random memory will be DMAed\n");
}
#ifdef CONFIG_IOMMU_LEAK
dump_leak();
#endif
}
static inline int need_iommu(struct pci_dev *dev, unsigned long addr, size_t size)
{
u64 mask = dev ? dev->dma_mask : 0xffffffff;
int high = (~mask & (unsigned long)(addr + size)) != 0;
int mmu = high;
if (force_mmu)
mmu = 1;
if (no_iommu) {
if (high)
panic("pci_map_single: high address but no IOMMU.\n");
mmu = 0;
}
return mmu;
}
dma_addr_t pci_map_single(struct pci_dev *dev, void *addr, size_t size,int dir)
{
unsigned long iommu_page;
unsigned long phys_mem, bus;
int i, npages;
BUG_ON(dir == PCI_DMA_NONE);
phys_mem = virt_to_phys(addr);
if (!need_iommu(dev, phys_mem, size))
return phys_mem;
npages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
iommu_page = alloc_iommu(npages);
if (iommu_page == -1) {
iommu_full(dev, addr, size, dir);
return iommu_bus_base;
}
phys_mem &= PAGE_MASK;
for (i = 0; i < npages; i++, phys_mem += PAGE_SIZE) {
BUG_ON(phys_mem & ~PTE_MASK);
/*
* Set coherent mapping here to avoid needing to flush
* the caches on mapping.
*/
iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem, GPTE_COHERENT);
#ifdef CONFIG_IOMMU_LEAK
/* XXX need eventually caller of pci_map_sg */
if (iommu_leak_tab)
iommu_leak_tab[iommu_page + i] = __builtin_return_address(0);
#endif
}
flush_gart();
bus = iommu_bus_base + iommu_page*PAGE_SIZE;
return bus + ((unsigned long)addr & ~PAGE_MASK);
}
/*
* Free a temporary PCI mapping.
*/
void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
unsigned long iommu_page;
int i, npages;
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
dma_addr > iommu_bus_base + iommu_size)
return;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = 0;
#ifdef CONFIG_IOMMU_LEAK
if (iommu_leak_tab)
iommu_leak_tab[iommu_page + i] = 0;
#endif
}
flush_gart();
free_iommu(iommu_page, npages);
}
EXPORT_SYMBOL(pci_map_single);
EXPORT_SYMBOL(pci_unmap_single);
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
{
unsigned long a;
if (!iommu_size) {
iommu_size = aper_size;
if (!no_agp)
iommu_size /= 2;
}
a = aper + iommu_size;
iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
if (iommu_size < 64*1024*1024)
printk(KERN_WARNING
"PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
return iommu_size;
}
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
unsigned aper_size = 0, aper_base_32;
u64 aper_base;
unsigned aper_order;
pci_read_config_dword(dev, 0x94, &aper_base_32);
pci_read_config_dword(dev, 0x90, &aper_order);
aper_order = (aper_order >> 1) & 7;
aper_base = aper_base_32 & 0x7fff;
aper_base <<= 25;
aper_size = (32 * 1024 * 1024) << aper_order;
if (aper_base + aper_size >= 0xffffffff || !aper_size)
aper_base = 0;
*size = aper_size;
return aper_base;
}
/*
* Private Northbridge GATT initialization in case we cannot use the
* AGP driver for some reason.
*/
static __init int init_k8_gatt(agp_kern_info *info)
{
struct pci_dev *dev;
void *gatt;
unsigned aper_base, new_aper_base;
unsigned aper_size, gatt_size, new_aper_size;
aper_size = aper_base = info->aper_size = 0;
for_all_nb(dev) {
new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base)
goto nommu;
if (!aper_base) {
aper_size = new_aper_size;
aper_base = new_aper_base;
}
if (aper_size != new_aper_size || aper_base != new_aper_base)
goto nommu;
}
if (!aper_base)
goto nommu;
info->aper_base = aper_base;
info->aper_size = aper_size>>20;
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
if (!gatt)
panic("Cannot allocate GATT table");
memset(gatt, 0, gatt_size);
change_page_attr(virt_to_page(gatt), gatt_size/PAGE_SIZE, PAGE_KERNEL_NOCACHE);
agp_gatt_table = gatt;
for_all_nb(dev) {
u32 ctl;
u32 gatt_reg;
gatt_reg = ((u64)gatt) >> 12;
gatt_reg <<= 4;
pci_write_config_dword(dev, 0x98, gatt_reg);
pci_read_config_dword(dev, 0x90, &ctl);
ctl |= 1;
ctl &= ~((1<<4) | (1<<5));
pci_write_config_dword(dev, 0x90, ctl);
}
flush_gart();
printk("PCI-DMA: aperture base @ %x size %u KB\n", aper_base, aper_size>>10);
return 0;
nommu:
/* XXX: reject 0xffffffff mask now in pci mapping functions */
printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
return -1;
}
void __init pci_iommu_init(void)
{
agp_kern_info info;
unsigned long aper_size;
unsigned long iommu_start;
#ifndef CONFIG_AGP
no_agp = 1;
#else
no_agp = no_agp || (agp_init() < 0) || (agp_copy_info(&info) < 0);
#endif
if (no_iommu || (!force_mmu && end_pfn < 0xffffffff>>PAGE_SHIFT)) {
printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
no_iommu = 1;
return;
}
if (no_agp) {
int err = -1;
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
no_agp = 1;
if (force_mmu || end_pfn >= 0xffffffff>>PAGE_SHIFT)
err = init_k8_gatt(&info);
if (err < 0) {
printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
no_iommu = 1;
return;
}
}
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
get_order(iommu_pages/8));
if (!iommu_gart_bitmap)
panic("Cannot allocate iommu bitmap\n");
memset(iommu_gart_bitmap, 0, iommu_pages/8);
#ifdef CONFIG_IOMMU_LEAK
if (leak_trace) {
iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
get_order(iommu_pages*sizeof(void *)));
if (iommu_leak_tab)
memset(iommu_leak_tab, 0, iommu_pages * 8);
else
printk("PCI-DMA: Cannot allocate leak trace area\n");
}
#endif
/*
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
agp_memory_reserved = iommu_size;
printk(KERN_INFO"PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size>>20);
iommu_start = aper_size - iommu_size;
iommu_bus_base = info.aper_base + iommu_start;
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
bad_dma_address = iommu_bus_base;
asm volatile("wbinvd" ::: "memory");
}
/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]]
size set size of iommu (in bytes)
noagp don't initialize the AGP driver and use full aperture.
off don't use the IOMMU
leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
memaper[=order] allocate an own aperture over RAM with size 32MB^order.
*/
__init int iommu_setup(char *opt)
{
int arg;
char *p = opt;
for (;;) {
if (!memcmp(p,"noagp", 5))
no_agp = 1;
if (!memcmp(p,"off", 3))
no_iommu = 1;
if (!memcmp(p,"force", 5))
force_mmu = 1;
if (!memcmp(p,"noforce", 7))
force_mmu = 0;
if (!memcmp(p, "memaper", 7)) {
fallback_aper_force = 1;
p += 7;
if (*p == '=' && get_option(&p, &arg))
fallback_aper_order = arg;
}
#ifdef CONFIG_IOMMU_LEAK
if (!memcmp(p,"leak", 4))
leak_trace = 1;
#endif
if (isdigit(*p) && get_option(&p, &arg))
iommu_size = arg;
do {
if (*p == ' ' || *p == 0)
return 0;
} while (*p++ != ',');
}
return 1;
}
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
/*
* Dummy IO MMU functions
*/
extern unsigned long end_pfn;
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
void *ret;
int gfp = GFP_ATOMIC;
if (hwdev == NULL ||
end_pfn > (hwdev->dma_mask>>PAGE_SHIFT) || /* XXX */
(u32)hwdev->dma_mask < 0xffffffff)
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_bus(ret);
}
return ret;
}
void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static void __init check_ram(void)
{
if (end_pfn >= 0xffffffff>>PAGE_SHIFT) {
printk(KERN_ERR "WARNING more than 4GB of memory but no IOMMU.\n"
KERN_ERR "WARNING 32bit PCI may malfunction.\n");
/* Could play with highmem_start_page here to trick some subsystems
into bounce buffers. Unfortunately that would require setting
CONFIG_HIGHMEM too.
*/
}
}
O_TARGET := pci.o
obj-y := x86-64.o
......
......@@ -133,6 +133,10 @@ static int __init pcibios_init(void)
pcibios_resource_survey();
#ifdef CONFIG_GART_IOMMU
pci_iommu_init();
#endif
/* may eventually need to do ACPI sort here. */
return 0;
}
......@@ -185,11 +189,11 @@ unsigned int pcibios_assign_all_busses(void)
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
}
int pcibios_enable_device(struct pci_dev *dev)
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
if ((err = pcibios_enable_resources(dev)) < 0)
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
return pcibios_enable_irq(dev);
......
......@@ -41,19 +41,6 @@ static void __devinit pci_fixup_ide_bases(struct pci_dev *d)
}
}
static void __devinit pci_fixup_ide_trash(struct pci_dev *d)
{
int i;
/*
* There exist PCI IDE controllers which have utter garbage
* in first four base registers. Ignore that.
*/
DBG("PCI: IDE base address trash cleared for %s\n", d->slot_name);
for(i=0; i<4; i++)
d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
}
struct pci_fixup pcibios_fixups[] = {
{ PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810 },
......
......@@ -29,7 +29,7 @@ extern unsigned int pci_probe;
extern unsigned int pcibios_max_latency;
void pcibios_resource_survey(void);
int pcibios_enable_resources(struct pci_dev *);
int pcibios_enable_resources(struct pci_dev *, int);
/* pci-pc.c */
......
......@@ -243,7 +243,7 @@ void __init pcibios_resource_survey(void)
pcibios_assign_resources();
}
int pcibios_enable_resources(struct pci_dev *dev)
int pcibios_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
......@@ -252,6 +252,9 @@ int pcibios_enable_resources(struct pci_dev *dev)
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for(idx=0; idx<6; idx++) {
if (!(mask & (1<<idx)))
continue;
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", dev->slot_name);
......
#ifndef ASM_PCI_DIRECT_H
#define ASM_PCI_DIRECT_H 1
#include <linux/types.h>
#include <asm/io.h>
/* Direct PCI access. This is used for PCI accesses in early boot before
the PCI subsystem works. */
#define PDprintk(x...)
static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
{
u32 v;
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
v = inl(0xcfc);
PDprintk("%x reading from %x: %x\n", slot, offset, v);
return v;
}
static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
u32 val)
{
PDprintk("%x writing to %x: %x\n", slot, offset, val);
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outl(val, 0xcfc);
}
#endif
......@@ -2,9 +2,15 @@
#define __x8664_PCI_H
#include <linux/config.h>
#include <asm/io.h>
#ifdef __KERNEL__
#include <linux/mm.h> /* for struct page */
extern dma_addr_t bad_dma_address;
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
......@@ -23,6 +29,7 @@ void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus);
extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
void pcibios_set_master(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq);
struct irq_routing_table *pcibios_get_irq_routing_table(void);
......@@ -30,19 +37,16 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/io.h>
#include <asm/page.h>
struct pci_dev;
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
extern int iommu_setup(char *opt);
extern void pci_iommu_init(void);
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices,
......@@ -65,55 +69,95 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
#ifdef CONFIG_GART_IOMMU
/* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
*
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single is performed.
*/
extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
size_t size, int direction);
extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t addr,
size_t size, int direction);
/*
* pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
* to pci_map_single, but takes a struct page instead of a virtual address
*/
#define pci_map_page(dev,page,offset,size,dir) \
pci_map_single((dev), page_address(page)+(offset), (size), (dir))
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
__u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) \
((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
(((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) \
((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
static inline void pci_dma_sync_single(struct pci_dev *hwdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
BUG_ON(direction == PCI_DMA_NONE);
}
static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(direction == PCI_DMA_NONE);
}
#define PCI_DMA_BUS_IS_PHYS 0
#else
static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
size_t size, int direction)
{
dma_addr_t addr;
if (direction == PCI_DMA_NONE)
BUG();
flush_write_buffers();
return virt_to_phys(ptr);
}
out_of_line_bug();
addr = virt_to_bus(ptr);
/* Unmap a single streaming mode DMA translation. The dma_addr and size
* must match what was provided for in a previous pci_map_single call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guarenteed to see
* whatever the device wrote there.
/*
* This is gross, but what should I do.
* Unfortunately drivers do not test the return value of this.
*/
if ((addr+size) & ~hwdev->dma_mask)
out_of_line_bug();
return addr;
}
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
out_of_line_bug();
/* Nothing to do */
}
/*
* pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
* to pci_map_single, but takes a struct page instead of a virtual address
*/
static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
dma_addr_t addr;
if (direction == PCI_DMA_NONE)
BUG();
return (page - mem_map) * PAGE_SIZE + offset;
}
static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
/* Nothing to do */
out_of_line_bug();
addr = page_to_pfn(page) * PAGE_SIZE + offset;
if ((addr+size) & ~hwdev->dma_mask)
out_of_line_bug();
return addr;
}
/* pci_unmap_{page,single} is a nop so... */
......@@ -124,52 +168,6 @@ static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
int i;
if (direction == PCI_DMA_NONE)
BUG();
for (i = 0; i < nents; i++ ) {
if (!sg[i].page)
BUG();
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
}
flush_write_buffers();
return nents;
}
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
/* Nothing to do */
}
/* Make physical memory consistent for a single
* streaming mode DMA translation after a transfer.
*
......@@ -184,7 +182,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev,
size_t size, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
out_of_line_bug();
flush_write_buffers();
}
......@@ -199,10 +197,22 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
int nelems, int direction)
{
if (direction == PCI_DMA_NONE)
BUG();
out_of_line_bug();
flush_write_buffers();
}
#define PCI_DMA_BUS_IS_PHYS 1
#endif
extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction);
extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction);
#define pci_unmap_page pci_unmap_single
/* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
......@@ -234,9 +244,7 @@ pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offse
static __inline__ struct page *
pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
{
unsigned long poff = (dma_addr >> PAGE_SHIFT);
return mem_map + poff;
return virt_to_page(__va(dma_addr));
}
static __inline__ unsigned long
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment