Commit 6c763eb9 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6: (27 commits)
  [PATCH] PCI: nVidia quirk to make AER PCI-E extended capability visible
  [PATCH] PCI: fix issues with extended conf space when MMCONFIG disabled because of e820
  [PATCH] PCI: Bus Parity Status sysfs interface
  [PATCH] PCI: fix memory leak in MMCONFIG error path
  [PATCH] PCI: fix error with pci_get_device() call in the mpc85xx driver
  [PATCH] PCI: MSI-K8T-Neo2-Fir: run only where needed
  [PATCH] PCI: fix race with pci_walk_bus and pci_destroy_dev
  [PATCH] PCI: clean up pci documentation to be more specific
  [PATCH] PCI: remove unneeded msi code
  [PATCH] PCI: don't move ioapics below PCI bridge
  [PATCH] PCI: cleanup unused variable about msi driver
  [PATCH] PCI: disable msi mode in pci_disable_device
  [PATCH] PCI: Allow MSI to work on kexec kernel
  [PATCH] PCI: AMD 8131 MSI quirk called too late, bus_flags not inherited ?
  [PATCH] PCI: Move various PCI IDs to header file
  [PATCH] PCI Bus Parity Status-broken hardware attribute, EDAC foundation
  [PATCH] PCI: i386/x86_84: disable PCI resource decode on device disable
  [PATCH] PCI ACPI: Rename the functions to avoid multiple instances.
  [PATCH] PCI: don't enable device if already enabled
  [PATCH] PCI: Add a "enable" sysfs attribute to the pci devices to allow userspace (Xorg) to enable devices without doing foul direct access
  ...
parents dcc1a66a cf34a8e0
...@@ -213,9 +213,17 @@ have been remapped by the kernel. ...@@ -213,9 +213,17 @@ have been remapped by the kernel.
See Documentation/IO-mapping.txt for how to access device memory. See Documentation/IO-mapping.txt for how to access device memory.
You still need to call request_region() for I/O regions and The device driver needs to call pci_request_region() to make sure
request_mem_region() for memory regions to make sure nobody else is using the no other device is already using the same resource. The driver is expected
same device. to determine MMIO and IO Port resource availability _before_ calling
pci_enable_device(). Conversely, drivers should call pci_release_region()
_after_ calling pci_disable_device(). The idea is to prevent two devices
colliding on the same address range.
Generic flavors of pci_request_region() are request_mem_region()
(for MMIO ranges) and request_region() (for IO Port ranges).
Use these for address resources that are not described by "normal" PCI
interfaces (e.g. BAR).
All interrupt handlers should be registered with SA_SHIRQ and use the devid All interrupt handlers should be registered with SA_SHIRQ and use the devid
to map IRQs to devices (remember that all PCI interrupts are shared). to map IRQs to devices (remember that all PCI interrupts are shared).
......
...@@ -202,6 +202,8 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) ...@@ -202,6 +202,8 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
if (mcfg->config[i].base_reserved) { if (mcfg->config[i].base_reserved) {
printk(KERN_ERR PREFIX printk(KERN_ERR PREFIX
"MMCONFIG not in low 4GB of memory\n"); "MMCONFIG not in low 4GB of memory\n");
kfree(pci_mmcfg_config);
pci_mmcfg_config_num = 0;
return -ENODEV; return -ENODEV;
} }
} }
......
...@@ -288,6 +288,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) ...@@ -288,6 +288,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
void pcibios_disable_device (struct pci_dev *dev) void pcibios_disable_device (struct pci_dev *dev)
{ {
pcibios_disable_resources(dev);
if (pcibios_disable_irq) if (pcibios_disable_irq)
pcibios_disable_irq(dev); pcibios_disable_irq(dev);
} }
...@@ -242,6 +242,15 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask) ...@@ -242,6 +242,15 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
return 0; return 0;
} }
void pcibios_disable_resources(struct pci_dev *dev)
{
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
/* /*
* If we set up a device for bus mastering, we need to check the latency * If we set up a device for bus mastering, we need to check the latency
* timer as certain crappy BIOSes forget to set it properly. * timer as certain crappy BIOSes forget to set it properly.
......
...@@ -15,7 +15,9 @@ ...@@ -15,7 +15,9 @@
#include <asm/e820.h> #include <asm/e820.h>
#include "pci.h" #include "pci.h"
#define MMCONFIG_APER_SIZE (256*1024*1024) /* aperture is up to 256MB but BIOS may reserve less */
#define MMCONFIG_APER_MIN (2 * 1024*1024)
#define MMCONFIG_APER_MAX (256 * 1024*1024)
/* Assume systems with more busses have correct MCFG */ /* Assume systems with more busses have correct MCFG */
#define MAX_CHECK_BUS 16 #define MAX_CHECK_BUS 16
...@@ -197,9 +199,10 @@ void __init pci_mmcfg_init(void) ...@@ -197,9 +199,10 @@ void __init pci_mmcfg_init(void)
return; return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address, if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_SIZE, pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) { E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area is not E820-reserved\n"); printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return; return;
} }
......
...@@ -35,6 +35,7 @@ extern unsigned int pcibios_max_latency; ...@@ -35,6 +35,7 @@ extern unsigned int pcibios_max_latency;
void pcibios_resource_survey(void); void pcibios_resource_survey(void);
int pcibios_enable_resources(struct pci_dev *, int); int pcibios_enable_resources(struct pci_dev *, int);
void pcibios_disable_resources(struct pci_dev *);
/* pci-pc.c */ /* pci-pc.c */
......
...@@ -46,6 +46,10 @@ ...@@ -46,6 +46,10 @@
#define IRQ_DEBUG 0 #define IRQ_DEBUG 0
/* These can be overridden in platform_irq_init */
int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
/* default base addr of IPI table */ /* default base addr of IPI table */
void __iomem *ipi_base_addr = ((void __iomem *) void __iomem *ipi_base_addr = ((void __iomem *)
(__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
...@@ -60,7 +64,7 @@ __u8 isa_irq_to_vector_map[16] = { ...@@ -60,7 +64,7 @@ __u8 isa_irq_to_vector_map[16] = {
}; };
EXPORT_SYMBOL(isa_irq_to_vector_map); EXPORT_SYMBOL(isa_irq_to_vector_map);
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
int int
assign_irq_vector (int irq) assign_irq_vector (int irq)
...@@ -89,6 +93,19 @@ free_irq_vector (int vector) ...@@ -89,6 +93,19 @@ free_irq_vector (int vector)
printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
} }
int
reserve_irq_vector (int vector)
{
int pos;
if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR)
return -EINVAL;
pos = vector - IA64_FIRST_DEVICE_VECTOR;
return test_and_set_bit(pos, ia64_vector_mask);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
#else #else
......
...@@ -58,7 +58,7 @@ static int max_pcibus_number = 255; /* Default highest pci bus number */ ...@@ -58,7 +58,7 @@ static int max_pcibus_number = 255; /* Default highest pci bus number */
*/ */
static dma_addr_t static dma_addr_t
sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size) sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
{ {
return 0; return 0;
} }
...@@ -457,13 +457,6 @@ void sn_pci_fixup_slot(struct pci_dev *dev) ...@@ -457,13 +457,6 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
pcidev_info->pdi_sn_irq_info = NULL; pcidev_info->pdi_sn_irq_info = NULL;
kfree(sn_irq_info); kfree(sn_irq_info);
} }
/*
* MSI currently not supported on altix. Remove this when
* the MSI abstraction patches are integrated into the kernel
* (sometime after 2.6.16 releases)
*/
dev->no_msi = 1;
} }
/* /*
......
...@@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); ...@@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
int sn_force_interrupt_flag = 1; int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited; extern int sn_ioif_inited;
static struct list_head **sn_irq_lh; struct list_head **sn_irq_lh;
static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
u64 sn_irq_info, struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid, int req_irq, nasid_t req_nasid,
int req_slice) int req_slice)
{ {
...@@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, ...@@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid, (u64) SAL_INTR_ALLOC, (u64) local_nasid,
(u64) local_widget, (u64) sn_irq_info, (u64) req_irq, (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice); (u64) req_nasid, (u64) req_slice);
return ret_stuff.status; return ret_stuff.status;
} }
static inline void sn_intr_free(nasid_t local_nasid, int local_widget, void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info) struct sn_irq_info *sn_irq_info)
{ {
struct ia64_sal_retval ret_stuff; struct ia64_sal_retval ret_stuff;
...@@ -112,17 +113,12 @@ static void sn_end_irq(unsigned int irq) ...@@ -112,17 +113,12 @@ static void sn_end_irq(unsigned int irq)
static void sn_irq_info_free(struct rcu_head *head); static void sn_irq_info_free(struct rcu_head *head);
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
nasid_t nasid, int slice)
{ {
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; int vector;
int cpuid, cpuphys; int cpuphys;
int64_t bridge;
cpuid = first_cpu(mask);
cpuphys = cpu_physical_id(cpuid);
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list) {
u64 bridge;
int local_widget, status; int local_widget, status;
nasid_t local_nasid; nasid_t local_nasid;
struct sn_irq_info *new_irq_info; struct sn_irq_info *new_irq_info;
...@@ -130,13 +126,14 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) ...@@ -130,13 +126,14 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
if (new_irq_info == NULL) if (new_irq_info == NULL)
break; return NULL;
memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
bridge = (u64) new_irq_info->irq_bridge; bridge = (u64) new_irq_info->irq_bridge;
if (!bridge) { if (!bridge) {
kfree(new_irq_info); kfree(new_irq_info);
break; /* irq is not a device interrupt */ return NULL; /* irq is not a device interrupt */
} }
local_nasid = NASID_GET(bridge); local_nasid = NASID_GET(bridge);
...@@ -146,6 +143,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) ...@@ -146,6 +143,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
else else
local_widget = SWIN_WIDGETNUM(bridge); local_widget = SWIN_WIDGETNUM(bridge);
vector = sn_irq_info->irq_irq;
/* Free the old PROM new_irq_info structure */ /* Free the old PROM new_irq_info structure */
sn_intr_free(local_nasid, local_widget, new_irq_info); sn_intr_free(local_nasid, local_widget, new_irq_info);
/* Update kernels new_irq_info with new target info */ /* Update kernels new_irq_info with new target info */
...@@ -153,21 +151,27 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) ...@@ -153,21 +151,27 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
/* allocate a new PROM new_irq_info struct */ /* allocate a new PROM new_irq_info struct */
status = sn_intr_alloc(local_nasid, local_widget, status = sn_intr_alloc(local_nasid, local_widget,
__pa(new_irq_info), irq, new_irq_info, vector,
cpuid_to_nasid(cpuid), nasid, slice);
cpuid_to_slice(cpuid));
/* SAL call failed */ /* SAL call failed */
if (status) { if (status) {
kfree(new_irq_info); kfree(new_irq_info);
break; return NULL;
} }
new_irq_info->irq_cpuid = cpuid; cpuphys = nasid_slice_to_cpuid(nasid, slice);
new_irq_info->irq_cpuid = cpuphys;
register_intr_pda(new_irq_info); register_intr_pda(new_irq_info);
pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
if (pci_provider && pci_provider->target_interrupt)
/*
* If this represents a line interrupt, target it. If it's
* an msi (irq_int_bit < 0), it's already targeted.
*/
if (new_irq_info->irq_int_bit >= 0 &&
pci_provider && pci_provider->target_interrupt)
(pci_provider->target_interrupt)(new_irq_info); (pci_provider->target_interrupt)(new_irq_info);
spin_lock(&sn_irq_info_lock); spin_lock(&sn_irq_info_lock);
...@@ -176,9 +180,24 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) ...@@ -176,9 +180,24 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
call_rcu(&sn_irq_info->rcu, sn_irq_info_free); call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
set_irq_affinity_info((irq & 0xff), cpuphys, 0); set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif #endif
}
return new_irq_info;
}
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
nasid_t nasid;
int slice;
nasid = cpuid_to_nasid(first_cpu(mask));
slice = cpuid_to_slice(first_cpu(mask));
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list)
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
} }
struct hw_interrupt_type irq_type_sn = { struct hw_interrupt_type irq_type_sn = {
...@@ -202,6 +221,9 @@ void sn_irq_init(void) ...@@ -202,6 +221,9 @@ void sn_irq_init(void)
int i; int i;
irq_desc_t *base_desc = irq_desc; irq_desc_t *base_desc = irq_desc;
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
for (i = 0; i < NR_IRQS; i++) { for (i = 0; i < NR_IRQS; i++) {
if (base_desc[i].handler == &no_irq_type) { if (base_desc[i].handler == &no_irq_type) {
base_desc[i].handler = &irq_type_sn; base_desc[i].handler = &irq_type_sn;
...@@ -285,6 +307,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) ...@@ -285,6 +307,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
/* link it into the sn_irq[irq] list */ /* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock); spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
reserve_irq_vector(sn_irq_info->irq_irq);
spin_unlock(&sn_irq_info_lock); spin_unlock(&sn_irq_info_lock);
register_intr_pda(sn_irq_info); register_intr_pda(sn_irq_info);
...@@ -310,8 +333,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev) ...@@ -310,8 +333,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
spin_lock(&sn_irq_info_lock); spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list); list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock); spin_unlock(&sn_irq_info_lock);
if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
free_irq_vector(sn_irq_info->irq_irq);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free); call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
pci_dev_put(pci_dev); pci_dev_put(pci_dev);
} }
static inline void static inline void
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/sn/pcibr_provider.h> #include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h> #include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
...@@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* resources. * resources.
*/ */
*dma_handle = provider->dma_map_consistent(pdev, phys_addr, size); *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
SN_DMA_ADDR_PHYS);
if (!*dma_handle) { if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
free_pages((unsigned long)cpuaddr, get_order(size)); free_pages((unsigned long)cpuaddr, get_order(size));
...@@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, ...@@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
BUG_ON(dev->bus != &pci_bus_type); BUG_ON(dev->bus != &pci_bus_type);
phys_addr = __pa(cpu_addr); phys_addr = __pa(cpu_addr);
dma_addr = provider->dma_map(pdev, phys_addr, size); dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
if (!dma_addr) { if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
return 0; return 0;
...@@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, ...@@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
for (i = 0; i < nhwentries; i++, sg++) { for (i = 0; i < nhwentries; i++, sg++) {
phys_addr = SG_ENT_PHYS_ADDRESS(sg); phys_addr = SG_ENT_PHYS_ADDRESS(sg);
sg->dma_address = provider->dma_map(pdev, sg->dma_address = provider->dma_map(pdev,
phys_addr, sg->length); phys_addr, sg->length,
SN_DMA_ADDR_PHYS);
if (!sg->dma_address) { if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
......
...@@ -41,7 +41,7 @@ extern int sn_ioif_inited; ...@@ -41,7 +41,7 @@ extern int sn_ioif_inited;
static dma_addr_t static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info, pcibr_dmamap_ate32(struct pcidev_info *info,
u64 paddr, size_t req_size, u64 flags) u64 paddr, size_t req_size, u64 flags, int dma_flags)
{ {
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
...@@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info, ...@@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PCIX(pcibus_info)) if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF); ate_flags &= ~(PCI32_ATE_PREF);
xio_addr = if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr); PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
offset = IOPGOFF(xio_addr); offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset); ate = ate_flags | (xio_addr - offset);
...@@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info, ...@@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PIC_SOFT(pcibus_info)) { if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
} }
/*
* If we're mapping for MSI, set the MSI bit in the ATE
*/
if (dma_flags & SN_DMA_MSI)
ate |= PCI32_ATE_MSI;
ate_write(pcibus_info, ate_index, ate_count, ate); ate_write(pcibus_info, ate_index, ate_count, ate);
/* /*
...@@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info, ...@@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr); ATE_SWAP_ON(pci_addr);
return pci_addr; return pci_addr;
} }
static dma_addr_t static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
u64 dma_attributes) u64 dma_attributes, int dma_flags)
{ {
struct pcibus_info *pcibus_info = (struct pcibus_info *) struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info); ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
u64 pci_addr; u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */ /* Translate to Crosstalk View of Physical Address */
pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
PHYS_TO_TIODMA(paddr)) | dma_attributes; pci_addr = IS_PIC_SOFT(pcibus_info) ?
PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr) | dma_attributes;
else
pci_addr = IS_PIC_SOFT(pcibus_info) ?
paddr :
paddr | dma_attributes;
/* Handle Bus mode */ /* Handle Bus mode */
if (IS_PCIX(pcibus_info)) if (IS_PCIX(pcibus_info))
...@@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, ...@@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
((u64) pcibus_info-> ((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else } else
pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; pci_addr |= (dma_flags & SN_DMA_MSI) ?
TIOCP_PCI64_CMDTYPE_MSI :
TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
...@@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, ...@@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
static dma_addr_t static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info, pcibr_dmatrans_direct32(struct pcidev_info * info,
u64 paddr, size_t req_size, u64 flags) u64 paddr, size_t req_size, u64 flags, int dma_flags)
{ {
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
...@@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, ...@@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
return 0; return 0;
} }
if (dma_flags & SN_DMA_MSI)
return 0;
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr); PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase; xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base; offset = xio_addr - xio_base;
...@@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr) ...@@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr)
*/ */
dma_addr_t dma_addr_t
pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{ {
dma_addr_t dma_handle; dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
...@@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) ...@@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/ */
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_PREF); PCI64_ATTR_PREF, dma_flags);
} else { } else {
/* Handle 32-63 bit cards via direct mapping */ /* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
size, 0); size, 0, dma_flags);
if (!dma_handle) { if (!dma_handle) {
/* /*
* It is a 32 bit card and we cannot do direct mapping, * It is a 32 bit card and we cannot do direct mapping,
...@@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) ...@@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/ */
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
size, PCI32_ATE_PREF); size, PCI32_ATE_PREF,
dma_flags);
} }
} }
...@@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) ...@@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
dma_addr_t dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
size_t size) size_t size, int dma_flags)
{ {
dma_addr_t dma_handle; dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) { if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_BAR); PCI64_ATTR_BAR, dma_flags);
} else { } else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size, phys_addr, size,
PCI32_ATE_BAR); PCI32_ATE_BAR, dma_flags);
} }
return dma_handle; return dma_handle;
......
...@@ -515,10 +515,16 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) ...@@ -515,10 +515,16 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* use the GART mapped mode. * use the GART mapped mode.
*/ */
static u64 static u64
tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count) tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{ {
u64 mapaddr; u64 mapaddr;
/*
* Not supported for now ...
*/
if (dma_flags & SN_DMA_MSI)
return 0;
/* /*
* If card is 64 or 48 bit addresable, use a direct mapping. 32 * If card is 64 or 48 bit addresable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that * bit direct is so restrictive w.r.t. where the memory resides that
......
...@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) ...@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
(ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
#define ATE_VALID(ate) ((ate) & (1UL << 63)) #define ATE_VALID(ate) ((ate) & (1UL << 63))
#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) #define ATE_MAKE(addr, ps, msi) \
(((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
/* /*
* Flavors of ate-based mapping supported by tioce_alloc_map() * Flavors of ate-based mapping supported by tioce_alloc_map()
...@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) ...@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
* *
* 63 - must be 1 to indicate d64 mode to CE hardware * 63 - must be 1 to indicate d64 mode to CE hardware
* 62 - barrier bit ... controlled with tioce_dma_barrier() * 62 - barrier bit ... controlled with tioce_dma_barrier()
* 61 - 0 since this is not an MSI transaction * 61 - msi bit ... specified through dma_flags
* 60:54 - reserved, MBZ * 60:54 - reserved, MBZ
*/ */
static u64 static u64
tioce_dma_d64(unsigned long ct_addr) tioce_dma_d64(unsigned long ct_addr, int dma_flags)
{ {
u64 bus_addr; u64 bus_addr;
bus_addr = ct_addr | (1UL << 63); bus_addr = ct_addr | (1UL << 63);
if (dma_flags & SN_DMA_MSI)
bus_addr |= (1UL << 61);
return bus_addr; return bus_addr;
} }
...@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, ...@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
*/ */
static u64 static u64
tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
u64 ct_addr, int len) u64 ct_addr, int len, int dma_flags)
{ {
int i; int i;
int j; int j;
...@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ...@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
int entries; int entries;
int nates; int nates;
u64 pagesize; u64 pagesize;
int msi_capable, msi_wanted;
u64 *ate_shadow; u64 *ate_shadow;
u64 *ate_reg; u64 *ate_reg;
u64 addr; u64 addr;
...@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ...@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate3240; ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = ce_kern->ce_ate3240_pagesize; pagesize = ce_kern->ce_ate3240_pagesize;
bus_base = TIOCE_M32_MIN; bus_base = TIOCE_M32_MIN;
msi_capable = 1;
break; break;
case TIOCE_ATE_M40: case TIOCE_ATE_M40:
first = 0; first = 0;
...@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ...@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate40; ate_reg = ce_mmr->ce_ure_ate40;
pagesize = MB(64); pagesize = MB(64);
bus_base = TIOCE_M40_MIN; bus_base = TIOCE_M40_MIN;
msi_capable = 0;
break; break;
case TIOCE_ATE_M40S: case TIOCE_ATE_M40S:
/* /*
...@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ...@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate3240; ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = GB(16); pagesize = GB(16);
bus_base = TIOCE_M40S_MIN; bus_base = TIOCE_M40S_MIN;
msi_capable = 0;
break; break;
default: default:
return 0; return 0;
} }
msi_wanted = dma_flags & SN_DMA_MSI;
if (msi_wanted && !msi_capable)
return 0;
nates = ATE_NPAGES(ct_addr, len, pagesize); nates = ATE_NPAGES(ct_addr, len, pagesize);
if (nates > entries) if (nates > entries)
return 0; return 0;
...@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ...@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
for (j = 0; j < nates; j++) { for (j = 0; j < nates; j++) {
u64 ate; u64 ate;
ate = ATE_MAKE(addr, pagesize); ate = ATE_MAKE(addr, pagesize, msi_wanted);
ate_shadow[i + j] = ate; ate_shadow[i + j] = ate;
tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
addr += pagesize; addr += pagesize;
...@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ...@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
* Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
*/ */
static u64 static u64
tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
{ {
int dma_ok; int dma_ok;
int port; int port;
...@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) ...@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
u64 ct_lower; u64 ct_lower;
dma_addr_t bus_addr; dma_addr_t bus_addr;
if (dma_flags & SN_DMA_MSI)
return 0;
ct_upper = ct_addr & ~0x3fffffffUL; ct_upper = ct_addr & ~0x3fffffffUL;
ct_lower = ct_addr & 0x3fffffffUL; ct_lower = ct_addr & 0x3fffffffUL;
...@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) ...@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
*/ */
static u64 static u64
tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
int barrier) int barrier, int dma_flags)
{ {
unsigned long flags; unsigned long flags;
u64 ct_addr; u64 ct_addr;
...@@ -523,14 +537,17 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, ...@@ -523,14 +537,17 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (dma_mask < 0x7fffffffUL) if (dma_mask < 0x7fffffffUL)
return 0; return 0;
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
ct_addr = PHYS_TO_TIODMA(paddr); ct_addr = PHYS_TO_TIODMA(paddr);
else
ct_addr = paddr;
/* /*
* If the device can generate 64 bit addresses, create a D64 map. * If the device can generate 64 bit addresses, create a D64 map.
* Since this should never fail, bypass the rest of the checks.
*/ */
if (dma_mask == ~0UL) { if (dma_mask == ~0UL) {
mapaddr = tioce_dma_d64(ct_addr); mapaddr = tioce_dma_d64(ct_addr, dma_flags);
if (mapaddr)
goto dma_map_done; goto dma_map_done;
} }
...@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, ...@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (byte_count > MB(64)) { if (byte_count > MB(64)) {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
port, ct_addr, byte_count); port, ct_addr, byte_count,
dma_flags);
if (!mapaddr) if (!mapaddr)
mapaddr = mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
ct_addr, byte_count); ct_addr, byte_count,
dma_flags);
} else { } else {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
ct_addr, byte_count); ct_addr, byte_count,
dma_flags);
if (!mapaddr) if (!mapaddr)
mapaddr = mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
port, ct_addr, byte_count); port, ct_addr, byte_count,
dma_flags);
} }
} }
...@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, ...@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
* 32-bit direct is the next mode to try * 32-bit direct is the next mode to try
*/ */
if (!mapaddr && dma_mask >= 0xffffffffUL) if (!mapaddr && dma_mask >= 0xffffffffUL)
mapaddr = tioce_dma_d32(pdev, ct_addr); mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
/* /*
* Last resort, try 32-bit ATE-based map. * Last resort, try 32-bit ATE-based map.
...@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, ...@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (!mapaddr) if (!mapaddr)
mapaddr = mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
byte_count); byte_count, dma_flags);
spin_unlock_irqrestore(&ce_kern->ce_lock, flags); spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
...@@ -622,9 +643,9 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, ...@@ -622,9 +643,9 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
* in the address. * in the address.
*/ */
static u64 static u64
tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{ {
return tioce_do_dma_map(pdev, paddr, byte_count, 0); return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
} }
/** /**
...@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) ...@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
* Simply call tioce_do_dma_map() to create a map with the barrier bit set * Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address. * in the address.
*/ static u64 */ static u64
tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count) tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{ {
return tioce_do_dma_map(pdev, paddr, byte_count, 1); return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
} }
/** /**
...@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit) ...@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
while (ate_index <= last_ate) { while (ate_index <= last_ate) {
u64 ate; u64 ate;
ate = ATE_MAKE(0xdeadbeef, ps); ate = ATE_MAKE(0xdeadbeef, ps, 0);
ce_kern->ce_ate3240_shadow[ate_index] = ate; ce_kern->ce_ate3240_shadow[ate_index] = ate;
tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
ate); ate);
......
...@@ -379,13 +379,12 @@ mpc85xx_cds_pcibios_fixup(void) ...@@ -379,13 +379,12 @@ mpc85xx_cds_pcibios_fixup(void)
PCI_DEVICE_ID_VIA_82C586_2, NULL))) { PCI_DEVICE_ID_VIA_82C586_2, NULL))) {
dev->irq = 10; dev->irq = 10;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 10); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 10);
pci_dev_put(dev);
}
if ((dev = pci_get_device(PCI_VENDOR_ID_VIA, if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_82C586_2, dev))) { PCI_DEVICE_ID_VIA_82C586_2, dev))) {
dev->irq = 11; dev->irq = 11;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
}
pci_dev_put(dev); pci_dev_put(dev);
} }
} }
......
...@@ -13,7 +13,10 @@ ...@@ -13,7 +13,10 @@
#include "pci.h" #include "pci.h"
#define MMCONFIG_APER_SIZE (256*1024*1024) /* aperture is up to 256MB but BIOS may reserve less */
#define MMCONFIG_APER_MIN (2 * 1024*1024)
#define MMCONFIG_APER_MAX (256 * 1024*1024)
/* Verify the first 16 busses. We assume that systems with more busses /* Verify the first 16 busses. We assume that systems with more busses
get MCFG right. */ get MCFG right. */
#define MAX_CHECK_BUS 16 #define MAX_CHECK_BUS 16
...@@ -175,9 +178,10 @@ void __init pci_mmcfg_init(void) ...@@ -175,9 +178,10 @@ void __init pci_mmcfg_init(void)
return; return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address, if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_SIZE, pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) { E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area is not E820-reserved\n"); printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return; return;
} }
...@@ -190,7 +194,8 @@ void __init pci_mmcfg_init(void) ...@@ -190,7 +194,8 @@ void __init pci_mmcfg_init(void)
} }
for (i = 0; i < pci_mmcfg_config_num; ++i) { for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, MMCONFIG_APER_SIZE); pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
MMCONFIG_APER_MAX);
if (!pci_mmcfg_virt[i].virt) { if (!pci_mmcfg_virt[i].virt) {
printk("PCI: Cannot map mmconfig aperture for segment %d\n", printk("PCI: Cannot map mmconfig aperture for segment %d\n",
pci_mmcfg_config[i].pci_segment_group_number); pci_mmcfg_config[i].pci_segment_group_number);
......
...@@ -26,7 +26,11 @@ obj-$(CONFIG_PPC32) += setup-irq.o ...@@ -26,7 +26,11 @@ obj-$(CONFIG_PPC32) += setup-irq.o
obj-$(CONFIG_PPC64) += setup-bus.o obj-$(CONFIG_PPC64) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o obj-$(CONFIG_X86_VISWS) += setup-irq.o
obj-$(CONFIG_PCI_MSI) += msi.o
msiobj-y := msi.o msi-apic.o
msiobj-$(CONFIG_IA64_GENERIC) += msi-altix.o
msiobj-$(CONFIG_IA64_SGI_SN2) += msi-altix.o
obj-$(CONFIG_PCI_MSI) += $(msiobj-y)
# #
# ACPI Related PCI FW Functions # ACPI Related PCI FW Functions
......
...@@ -81,9 +81,9 @@ void __devinit pci_bus_add_device(struct pci_dev *dev) ...@@ -81,9 +81,9 @@ void __devinit pci_bus_add_device(struct pci_dev *dev)
{ {
device_add(&dev->dev); device_add(&dev->dev);
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_add_tail(&dev->global_list, &pci_devices); list_add_tail(&dev->global_list, &pci_devices);
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
pci_proc_attach_device(dev); pci_proc_attach_device(dev);
pci_create_sysfs_dev_files(dev); pci_create_sysfs_dev_files(dev);
...@@ -125,10 +125,10 @@ void __devinit pci_bus_add_devices(struct pci_bus *bus) ...@@ -125,10 +125,10 @@ void __devinit pci_bus_add_devices(struct pci_bus *bus)
*/ */
if (dev->subordinate) { if (dev->subordinate) {
if (list_empty(&dev->subordinate->node)) { if (list_empty(&dev->subordinate->node)) {
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_add_tail(&dev->subordinate->node, list_add_tail(&dev->subordinate->node,
&dev->bus->children); &dev->bus->children);
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
} }
pci_bus_add_devices(dev->subordinate); pci_bus_add_devices(dev->subordinate);
...@@ -168,7 +168,7 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), ...@@ -168,7 +168,7 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
struct list_head *next; struct list_head *next;
bus = top; bus = top;
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
next = top->devices.next; next = top->devices.next;
for (;;) { for (;;) {
if (next == &bus->devices) { if (next == &bus->devices) {
...@@ -180,22 +180,19 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), ...@@ -180,22 +180,19 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
continue; continue;
} }
dev = list_entry(next, struct pci_dev, bus_list); dev = list_entry(next, struct pci_dev, bus_list);
pci_dev_get(dev);
if (dev->subordinate) { if (dev->subordinate) {
/* this is a pci-pci bridge, do its devices next */ /* this is a pci-pci bridge, do its devices next */
next = dev->subordinate->devices.next; next = dev->subordinate->devices.next;
bus = dev->subordinate; bus = dev->subordinate;
} else } else
next = dev->bus_list.next; next = dev->bus_list.next;
spin_unlock(&pci_bus_lock);
/* Run device routines with the bus unlocked */ /* Run device routines with the device locked */
down(&dev->dev.sem);
cb(dev, userdata); cb(dev, userdata);
up(&dev->dev.sem);
spin_lock(&pci_bus_lock);
pci_dev_put(dev);
} }
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
} }
EXPORT_SYMBOL_GPL(pci_walk_bus); EXPORT_SYMBOL_GPL(pci_walk_bus);
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/cpumask.h>
#include <asm/sn/addrs.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/nodepda.h>
#include "msi.h"
struct sn_msi_info {
u64 pci_addr;
struct sn_irq_info *sn_irq_info;
};
static struct sn_msi_info *sn_msi_info;
static void
sn_msi_teardown(unsigned int vector)
{
nasid_t nasid;
int widget;
struct pci_dev *pdev;
struct pcidev_info *sn_pdev;
struct sn_irq_info *sn_irq_info;
struct pcibus_bussoft *bussoft;
struct sn_pcibus_provider *provider;
sn_irq_info = sn_msi_info[vector].sn_irq_info;
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
return;
sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
pdev = sn_pdev->pdi_linux_pcidev;
provider = SN_PCIDEV_BUSPROVIDER(pdev);
(*provider->dma_unmap)(pdev,
sn_msi_info[vector].pci_addr,
PCI_DMA_FROMDEVICE);
sn_msi_info[vector].pci_addr = 0;
bussoft = SN_PCIDEV_BUSSOFT(pdev);
nasid = NASID_GET(bussoft->bs_base);
widget = (nasid & 1) ?
TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
SWIN_WIDGETNUM(bussoft->bs_base);
sn_intr_free(nasid, widget, sn_irq_info);
sn_msi_info[vector].sn_irq_info = NULL;
return;
}
int
sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
u32 *addr_hi, u32 *addr_lo, u32 *data)
{
int widget;
int status;
nasid_t nasid;
u64 bus_addr;
struct sn_irq_info *sn_irq_info;
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
if (bussoft == NULL)
return -EINVAL;
if (provider == NULL || provider->dma_map_consistent == NULL)
return -EINVAL;
/*
* Set up the vector plumbing. Let the prom (via sn_intr_alloc)
* decide which cpu to direct this msi at by default.
*/
nasid = NASID_GET(bussoft->bs_base);
widget = (nasid & 1) ?
TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
SWIN_WIDGETNUM(bussoft->bs_base);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (! sn_irq_info)
return -ENOMEM;
status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1);
if (status) {
kfree(sn_irq_info);
return -ENOMEM;
}
sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */
sn_irq_fixup(pdev, sn_irq_info);
/* Prom probably should fill these in, but doesn't ... */
sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
sn_irq_info->irq_bridge = (void *)bussoft->bs_base;
/*
* Map the xio address into bus space
*/
bus_addr = (*provider->dma_map_consistent)(pdev,
sn_irq_info->irq_xtalkaddr,
sizeof(sn_irq_info->irq_xtalkaddr),
SN_DMA_MSI|SN_DMA_ADDR_XIO);
if (! bus_addr) {
sn_intr_free(nasid, widget, sn_irq_info);
kfree(sn_irq_info);
return -ENOMEM;
}
sn_msi_info[vector].sn_irq_info = sn_irq_info;
sn_msi_info[vector].pci_addr = bus_addr;
*addr_hi = (u32)(bus_addr >> 32);
*addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
/*
* In the SN platform, bit 16 is a "send vector" bit which
* must be present in order to move the vector through the system.
*/
*data = 0x100 + (unsigned int)vector;
#ifdef CONFIG_SMP
set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0);
#endif
return 0;
}
static void
sn_msi_target(unsigned int vector, unsigned int cpu,
u32 *addr_hi, u32 *addr_lo)
{
int slice;
nasid_t nasid;
u64 bus_addr;
struct pci_dev *pdev;
struct pcidev_info *sn_pdev;
struct sn_irq_info *sn_irq_info;
struct sn_irq_info *new_irq_info;
struct sn_pcibus_provider *provider;
sn_irq_info = sn_msi_info[vector].sn_irq_info;
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
return;
/*
* Release XIO resources for the old MSI PCI address
*/
sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
pdev = sn_pdev->pdi_linux_pcidev;
provider = SN_PCIDEV_BUSPROVIDER(pdev);
bus_addr = (u64)(*addr_hi) << 32 | (u64)(*addr_lo);
(*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
sn_msi_info[vector].pci_addr = 0;
nasid = cpuid_to_nasid(cpu);
slice = cpuid_to_slice(cpu);
new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
sn_msi_info[vector].sn_irq_info = new_irq_info;
if (new_irq_info == NULL)
return;
/*
* Map the xio address into bus space
*/
bus_addr = (*provider->dma_map_consistent)(pdev,
new_irq_info->irq_xtalkaddr,
sizeof(new_irq_info->irq_xtalkaddr),
SN_DMA_MSI|SN_DMA_ADDR_XIO);
sn_msi_info[vector].pci_addr = bus_addr;
*addr_hi = (u32)(bus_addr >> 32);
*addr_lo = (u32)(bus_addr & 0x00000000ffffffff);
}
struct msi_ops sn_msi_ops = {
.setup = sn_msi_setup,
.teardown = sn_msi_teardown,
#ifdef CONFIG_SMP
.target = sn_msi_target,
#endif
};
int
sn_msi_init(void)
{
sn_msi_info =
kzalloc(sizeof(struct sn_msi_info) * NR_VECTORS, GFP_KERNEL);
if (! sn_msi_info)
return -ENOMEM;
msi_register(&sn_msi_ops);
return 0;
}
/*
* MSI hooks for standard x86 apic
*/
#include <linux/pci.h>
#include <linux/irq.h>
#include "msi.h"
/*
* Shifts for APIC-based data
*/
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
#define MSI_DATA_DELIVERY_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
#define MSI_DATA_LEVEL_SHIFT 14
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_TRIGGER_SHIFT 15
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
/*
* Shift/mask fields for APIC-based bus address
*/
#define MSI_ADDR_HEADER 0xfee00000
#define MSI_ADDR_DESTID_MASK 0xfff0000f
#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
#define MSI_ADDR_DESTMODE_SHIFT 2
#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
#define MSI_ADDR_REDIRECTION_SHIFT 3
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
static void
msi_target_apic(unsigned int vector,
unsigned int dest_cpu,
u32 *address_hi, /* in/out */
u32 *address_lo) /* in/out */
{
u32 addr = *address_lo;
addr &= MSI_ADDR_DESTID_MASK;
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(dest_cpu));
*address_lo = addr;
}
static int
msi_setup_apic(struct pci_dev *pdev, /* unused in generic */
unsigned int vector,
u32 *address_hi,
u32 *address_lo,
u32 *data)
{
unsigned long dest_phys_id;
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
*address_hi = 0;
*address_lo = MSI_ADDR_HEADER |
MSI_ADDR_DESTMODE_PHYS |
MSI_ADDR_REDIRECTION_CPU |
MSI_ADDR_DESTID_CPU(dest_phys_id);
*data = MSI_DATA_TRIGGER_EDGE |
MSI_DATA_LEVEL_ASSERT |
MSI_DATA_DELIVERY_FIXED |
MSI_DATA_VECTOR(vector);
return 0;
}
static void
msi_teardown_apic(unsigned int vector)
{
return; /* no-op */
}
/*
* Generic ops used on most IA archs/platforms. Set with msi_register()
*/
struct msi_ops msi_apic_ops = {
.setup = msi_setup_apic,
.teardown = msi_teardown_apic,
.target = msi_target_apic,
};
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#include "pci.h" #include "pci.h"
#include "msi.h" #include "msi.h"
#define MSI_TARGET_CPU first_cpu(cpu_online_map)
static DEFINE_SPINLOCK(msi_lock); static DEFINE_SPINLOCK(msi_lock);
static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
static kmem_cache_t* msi_cachep; static kmem_cache_t* msi_cachep;
...@@ -37,9 +35,17 @@ static int nr_msix_devices; ...@@ -37,9 +35,17 @@ static int nr_msix_devices;
#ifndef CONFIG_X86_IO_APIC #ifndef CONFIG_X86_IO_APIC
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
#endif #endif
static struct msi_ops *msi_ops;
int
msi_register(struct msi_ops *ops)
{
msi_ops = ops;
return 0;
}
static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags) static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
{ {
memset(p, 0, NR_IRQS * sizeof(struct msi_desc)); memset(p, 0, NR_IRQS * sizeof(struct msi_desc));
...@@ -92,7 +98,7 @@ static void msi_set_mask_bit(unsigned int vector, int flag) ...@@ -92,7 +98,7 @@ static void msi_set_mask_bit(unsigned int vector, int flag)
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
{ {
struct msi_desc *entry; struct msi_desc *entry;
struct msg_address address; u32 address_hi, address_lo;
unsigned int irq = vector; unsigned int irq = vector;
unsigned int dest_cpu = first_cpu(cpu_mask); unsigned int dest_cpu = first_cpu(cpu_mask);
...@@ -108,28 +114,36 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) ...@@ -108,28 +114,36 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
if (!pos) if (!pos)
return; return;
pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
&address_hi);
pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
&address.lo_address.value); &address_lo);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
address.lo_address.value |= (cpu_physical_id(dest_cpu) << msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
MSI_TARGET_CPU_SHIFT);
entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
address_hi);
pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
address.lo_address.value); address_lo);
set_native_irq_info(irq, cpu_mask); set_native_irq_info(irq, cpu_mask);
break; break;
} }
case PCI_CAP_ID_MSIX: case PCI_CAP_ID_MSIX:
{ {
int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + int offset_hi =
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
int offset_lo =
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
address.lo_address.value = readl(entry->mask_base + offset); address_hi = readl(entry->mask_base + offset_hi);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; address_lo = readl(entry->mask_base + offset_lo);
address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
MSI_TARGET_CPU_SHIFT); msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
writel(address.lo_address.value, entry->mask_base + offset); writel(address_hi, entry->mask_base + offset_hi);
writel(address_lo, entry->mask_base + offset_lo);
set_native_irq_info(irq, cpu_mask); set_native_irq_info(irq, cpu_mask);
break; break;
} }
...@@ -251,30 +265,6 @@ static struct hw_interrupt_type msi_irq_wo_maskbit_type = { ...@@ -251,30 +265,6 @@ static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
.set_affinity = set_msi_affinity .set_affinity = set_msi_affinity
}; };
static void msi_data_init(struct msg_data *msi_data,
unsigned int vector)
{
memset(msi_data, 0, sizeof(struct msg_data));
msi_data->vector = (u8)vector;
msi_data->delivery_mode = MSI_DELIVERY_MODE;
msi_data->level = MSI_LEVEL_MODE;
msi_data->trigger = MSI_TRIGGER_MODE;
}
static void msi_address_init(struct msg_address *msi_address)
{
unsigned int dest_id;
unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
memset(msi_address, 0, sizeof(struct msg_address));
msi_address->hi_address = (u32)0;
dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
msi_address->lo_address.u.dest_id = dest_id;
msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
}
static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
static int assign_msi_vector(void) static int assign_msi_vector(void)
{ {
...@@ -369,13 +359,29 @@ static int msi_init(void) ...@@ -369,13 +359,29 @@ static int msi_init(void)
return status; return status;
} }
status = msi_arch_init();
if (status < 0) {
pci_msi_enable = 0;
printk(KERN_WARNING
"PCI: MSI arch init failed. MSI disabled.\n");
return status;
}
if (! msi_ops) {
printk(KERN_WARNING
"PCI: MSI ops not registered. MSI disabled.\n");
status = -EINVAL;
return status;
}
last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
status = msi_cache_init(); status = msi_cache_init();
if (status < 0) { if (status < 0) {
pci_msi_enable = 0; pci_msi_enable = 0;
printk(KERN_WARNING "PCI: MSI cache init failed\n"); printk(KERN_WARNING "PCI: MSI cache init failed\n");
return status; return status;
} }
last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
if (last_alloc_vector < 0) { if (last_alloc_vector < 0) {
pci_msi_enable = 0; pci_msi_enable = 0;
printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
...@@ -442,9 +448,11 @@ static void enable_msi_mode(struct pci_dev *dev, int pos, int type) ...@@ -442,9 +448,11 @@ static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
/* Set enabled bits to single MSI & enable MSI_enable bit */ /* Set enabled bits to single MSI & enable MSI_enable bit */
msi_enable(control, 1); msi_enable(control, 1);
pci_write_config_word(dev, msi_control_reg(pos), control); pci_write_config_word(dev, msi_control_reg(pos), control);
dev->msi_enabled = 1;
} else { } else {
msix_enable(control); msix_enable(control);
pci_write_config_word(dev, msi_control_reg(pos), control); pci_write_config_word(dev, msi_control_reg(pos), control);
dev->msix_enabled = 1;
} }
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */ /* PCI Express Endpoint device detected */
...@@ -461,9 +469,11 @@ void disable_msi_mode(struct pci_dev *dev, int pos, int type) ...@@ -461,9 +469,11 @@ void disable_msi_mode(struct pci_dev *dev, int pos, int type)
/* Set enabled bits to single MSI & enable MSI_enable bit */ /* Set enabled bits to single MSI & enable MSI_enable bit */
msi_disable(control); msi_disable(control);
pci_write_config_word(dev, msi_control_reg(pos), control); pci_write_config_word(dev, msi_control_reg(pos), control);
dev->msi_enabled = 0;
} else { } else {
msix_disable(control); msix_disable(control);
pci_write_config_word(dev, msi_control_reg(pos), control); pci_write_config_word(dev, msi_control_reg(pos), control);
dev->msix_enabled = 0;
} }
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */ /* PCI Express Endpoint device detected */
...@@ -538,7 +548,6 @@ int pci_save_msi_state(struct pci_dev *dev) ...@@ -538,7 +548,6 @@ int pci_save_msi_state(struct pci_dev *dev)
pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]); pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
if (control & PCI_MSI_FLAGS_MASKBIT) if (control & PCI_MSI_FLAGS_MASKBIT)
pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]); pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
save_state->cap_nr = PCI_CAP_ID_MSI; save_state->cap_nr = PCI_CAP_ID_MSI;
pci_add_saved_cap(dev, save_state); pci_add_saved_cap(dev, save_state);
return 0; return 0;
...@@ -575,6 +584,8 @@ void pci_restore_msi_state(struct pci_dev *dev) ...@@ -575,6 +584,8 @@ void pci_restore_msi_state(struct pci_dev *dev)
int pci_save_msix_state(struct pci_dev *dev) int pci_save_msix_state(struct pci_dev *dev)
{ {
int pos; int pos;
int temp;
int vector, head, tail = 0;
u16 control; u16 control;
struct pci_cap_saved_state *save_state; struct pci_cap_saved_state *save_state;
...@@ -582,6 +593,7 @@ int pci_save_msix_state(struct pci_dev *dev) ...@@ -582,6 +593,7 @@ int pci_save_msix_state(struct pci_dev *dev)
if (pos <= 0 || dev->no_msi) if (pos <= 0 || dev->no_msi)
return 0; return 0;
/* save the capability */
pci_read_config_word(dev, msi_control_reg(pos), &control); pci_read_config_word(dev, msi_control_reg(pos), &control);
if (!(control & PCI_MSIX_FLAGS_ENABLE)) if (!(control & PCI_MSIX_FLAGS_ENABLE))
return 0; return 0;
...@@ -593,7 +605,38 @@ int pci_save_msix_state(struct pci_dev *dev) ...@@ -593,7 +605,38 @@ int pci_save_msix_state(struct pci_dev *dev)
} }
*((u16 *)&save_state->data[0]) = control; *((u16 *)&save_state->data[0]) = control;
disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); /* save the table */
temp = dev->irq;
if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
kfree(save_state);
return -EINVAL;
}
vector = head = dev->irq;
while (head != tail) {
int j;
void __iomem *base;
struct msi_desc *entry;
entry = msi_desc[vector];
base = entry->mask_base;
j = entry->msi_attrib.entry_nr;
entry->address_lo_save =
readl(base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
entry->address_hi_save =
readl(base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
entry->data_save =
readl(base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET);
tail = msi_desc[vector]->link.tail;
vector = tail;
}
dev->irq = temp;
save_state->cap_nr = PCI_CAP_ID_MSIX; save_state->cap_nr = PCI_CAP_ID_MSIX;
pci_add_saved_cap(dev, save_state); pci_add_saved_cap(dev, save_state);
return 0; return 0;
...@@ -606,8 +649,6 @@ void pci_restore_msix_state(struct pci_dev *dev) ...@@ -606,8 +649,6 @@ void pci_restore_msix_state(struct pci_dev *dev)
int vector, head, tail = 0; int vector, head, tail = 0;
void __iomem *base; void __iomem *base;
int j; int j;
struct msg_address address;
struct msg_data data;
struct msi_desc *entry; struct msi_desc *entry;
int temp; int temp;
struct pci_cap_saved_state *save_state; struct pci_cap_saved_state *save_state;
...@@ -633,20 +674,13 @@ void pci_restore_msix_state(struct pci_dev *dev) ...@@ -633,20 +674,13 @@ void pci_restore_msix_state(struct pci_dev *dev)
base = entry->mask_base; base = entry->mask_base;
j = entry->msi_attrib.entry_nr; j = entry->msi_attrib.entry_nr;
msi_address_init(&address); writel(entry->address_lo_save,
msi_data_init(&data, vector);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
address.lo_address.value |= entry->msi_attrib.current_cpu <<
MSI_TARGET_CPU_SHIFT;
writel(address.lo_address.value,
base + j * PCI_MSIX_ENTRY_SIZE + base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
writel(address.hi_address, writel(entry->address_hi_save,
base + j * PCI_MSIX_ENTRY_SIZE + base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
writel(*(u32*)&data, writel(entry->data_save,
base + j * PCI_MSIX_ENTRY_SIZE + base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET); PCI_MSIX_ENTRY_DATA_OFFSET);
...@@ -660,30 +694,32 @@ void pci_restore_msix_state(struct pci_dev *dev) ...@@ -660,30 +694,32 @@ void pci_restore_msix_state(struct pci_dev *dev)
} }
#endif #endif
static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
{ {
struct msg_address address; int status;
struct msg_data data; u32 address_hi;
u32 address_lo;
u32 data;
int pos, vector = dev->irq; int pos, vector = dev->irq;
u16 control; u16 control;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI); pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
pci_read_config_word(dev, msi_control_reg(pos), &control); pci_read_config_word(dev, msi_control_reg(pos), &control);
/* Configure MSI capability structure */ /* Configure MSI capability structure */
msi_address_init(&address); status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
msi_data_init(&data, vector); if (status < 0)
entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> return status;
MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
pci_write_config_dword(dev, msi_lower_address_reg(pos), pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
address.lo_address.value);
if (is_64bit_address(control)) { if (is_64bit_address(control)) {
pci_write_config_dword(dev, pci_write_config_dword(dev,
msi_upper_address_reg(pos), address.hi_address); msi_upper_address_reg(pos), address_hi);
pci_write_config_word(dev, pci_write_config_word(dev,
msi_data_reg(pos, 1), *((u32*)&data)); msi_data_reg(pos, 1), data);
} else } else
pci_write_config_word(dev, pci_write_config_word(dev,
msi_data_reg(pos, 0), *((u32*)&data)); msi_data_reg(pos, 0), data);
if (entry->msi_attrib.maskbit) { if (entry->msi_attrib.maskbit) {
unsigned int maskbits, temp; unsigned int maskbits, temp;
/* All MSIs are unmasked by default, Mask them all */ /* All MSIs are unmasked by default, Mask them all */
...@@ -697,6 +733,8 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) ...@@ -697,6 +733,8 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
msi_mask_bits_reg(pos, is_64bit_address(control)), msi_mask_bits_reg(pos, is_64bit_address(control)),
maskbits); maskbits);
} }
return 0;
} }
/** /**
...@@ -710,6 +748,7 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry) ...@@ -710,6 +748,7 @@ static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
**/ **/
static int msi_capability_init(struct pci_dev *dev) static int msi_capability_init(struct pci_dev *dev)
{ {
int status;
struct msi_desc *entry; struct msi_desc *entry;
int pos, vector; int pos, vector;
u16 control; u16 control;
...@@ -742,7 +781,12 @@ static int msi_capability_init(struct pci_dev *dev) ...@@ -742,7 +781,12 @@ static int msi_capability_init(struct pci_dev *dev)
/* Replace with MSI handler */ /* Replace with MSI handler */
irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
/* Configure MSI capability structure */ /* Configure MSI capability structure */
msi_register_init(dev, entry); status = msi_register_init(dev, entry);
if (status != 0) {
dev->irq = entry->msi_attrib.default_vector;
kmem_cache_free(msi_cachep, entry);
return status;
}
attach_msi_entry(entry, vector); attach_msi_entry(entry, vector);
/* Set MSI enabled bits */ /* Set MSI enabled bits */
...@@ -765,8 +809,10 @@ static int msix_capability_init(struct pci_dev *dev, ...@@ -765,8 +809,10 @@ static int msix_capability_init(struct pci_dev *dev,
struct msix_entry *entries, int nvec) struct msix_entry *entries, int nvec)
{ {
struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
struct msg_address address; u32 address_hi;
struct msg_data data; u32 address_lo;
u32 data;
int status;
int vector, pos, i, j, nr_entries, temp = 0; int vector, pos, i, j, nr_entries, temp = 0;
unsigned long phys_addr; unsigned long phys_addr;
u32 table_offset; u32 table_offset;
...@@ -822,18 +868,20 @@ static int msix_capability_init(struct pci_dev *dev, ...@@ -822,18 +868,20 @@ static int msix_capability_init(struct pci_dev *dev,
/* Replace with MSI-X handler */ /* Replace with MSI-X handler */
irq_handler_init(PCI_CAP_ID_MSIX, vector, 1); irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
/* Configure MSI-X capability structure */ /* Configure MSI-X capability structure */
msi_address_init(&address); status = msi_ops->setup(dev, vector,
msi_data_init(&data, vector); &address_hi,
entry->msi_attrib.current_cpu = &address_lo,
((address.lo_address.u.dest_id >> &data);
MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); if (status < 0)
writel(address.lo_address.value, break;
writel(address_lo,
base + j * PCI_MSIX_ENTRY_SIZE + base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
writel(address.hi_address, writel(address_hi,
base + j * PCI_MSIX_ENTRY_SIZE + base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
writel(*(u32*)&data, writel(data,
base + j * PCI_MSIX_ENTRY_SIZE + base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET); PCI_MSIX_ENTRY_DATA_OFFSET);
attach_msi_entry(entry, vector); attach_msi_entry(entry, vector);
...@@ -865,6 +913,7 @@ static int msix_capability_init(struct pci_dev *dev, ...@@ -865,6 +913,7 @@ static int msix_capability_init(struct pci_dev *dev,
**/ **/
int pci_enable_msi(struct pci_dev* dev) int pci_enable_msi(struct pci_dev* dev)
{ {
struct pci_bus *bus;
int pos, temp, status = -EINVAL; int pos, temp, status = -EINVAL;
u16 control; u16 control;
...@@ -874,7 +923,8 @@ int pci_enable_msi(struct pci_dev* dev) ...@@ -874,7 +923,8 @@ int pci_enable_msi(struct pci_dev* dev)
if (dev->no_msi) if (dev->no_msi)
return status; return status;
if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) for (bus = dev->bus; bus; bus = bus->parent)
if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
return -EINVAL; return -EINVAL;
temp = dev->irq; temp = dev->irq;
...@@ -887,23 +937,23 @@ int pci_enable_msi(struct pci_dev* dev) ...@@ -887,23 +937,23 @@ int pci_enable_msi(struct pci_dev* dev)
if (!pos) if (!pos)
return -EINVAL; return -EINVAL;
pci_read_config_word(dev, msi_control_reg(pos), &control);
if (control & PCI_MSI_FLAGS_ENABLE)
return 0; /* Already in MSI mode */
if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
/* Lookup Sucess */ /* Lookup Sucess */
unsigned long flags; unsigned long flags;
pci_read_config_word(dev, msi_control_reg(pos), &control);
if (control & PCI_MSI_FLAGS_ENABLE)
return 0; /* Already in MSI mode */
spin_lock_irqsave(&msi_lock, flags); spin_lock_irqsave(&msi_lock, flags);
if (!vector_irq[dev->irq]) { if (!vector_irq[dev->irq]) {
msi_desc[dev->irq]->msi_attrib.state = 0; msi_desc[dev->irq]->msi_attrib.state = 0;
vector_irq[dev->irq] = -1; vector_irq[dev->irq] = -1;
nr_released_vectors--; nr_released_vectors--;
spin_unlock_irqrestore(&msi_lock, flags); spin_unlock_irqrestore(&msi_lock, flags);
msi_register_init(dev, msi_desc[dev->irq]); status = msi_register_init(dev, msi_desc[dev->irq]);
if (status == 0)
enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
return 0; return status;
} }
spin_unlock_irqrestore(&msi_lock, flags); spin_unlock_irqrestore(&msi_lock, flags);
dev->irq = temp; dev->irq = temp;
...@@ -980,6 +1030,8 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) ...@@ -980,6 +1030,8 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
void __iomem *base; void __iomem *base;
unsigned long flags; unsigned long flags;
msi_ops->teardown(vector);
spin_lock_irqsave(&msi_lock, flags); spin_lock_irqsave(&msi_lock, flags);
entry = msi_desc[vector]; entry = msi_desc[vector];
if (!entry || entry->dev != dev) { if (!entry || entry->dev != dev) {
...@@ -1008,34 +1060,9 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) ...@@ -1008,34 +1060,9 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
entry_nr * PCI_MSIX_ENTRY_SIZE + entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
if (head == vector) { if (head == vector)
/*
* Detect last MSI-X vector to be released.
* Release the MSI-X memory-mapped table.
*/
#if 0
int pos, nr_entries;
unsigned long phys_addr;
u32 table_offset;
u16 control;
u8 bir;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
pci_read_config_word(dev, msi_control_reg(pos),
&control);
nr_entries = multi_msix_capable(control);
pci_read_config_dword(dev, msix_table_offset_reg(pos),
&table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
phys_addr = pci_resource_start(dev, bir) + table_offset;
/*
* FIXME! and what did you want to do with phys_addr?
*/
#endif
iounmap(base); iounmap(base);
} }
}
return 0; return 0;
} }
...@@ -1108,6 +1135,7 @@ static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec) ...@@ -1108,6 +1135,7 @@ static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
**/ **/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{ {
struct pci_bus *bus;
int status, pos, nr_entries, free_vectors; int status, pos, nr_entries, free_vectors;
int i, j, temp; int i, j, temp;
u16 control; u16 control;
...@@ -1116,6 +1144,13 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) ...@@ -1116,6 +1144,13 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
if (!pci_msi_enable || !dev || !entries) if (!pci_msi_enable || !dev || !entries)
return -EINVAL; return -EINVAL;
if (dev->no_msi)
return -EINVAL;
for (bus = dev->bus; bus; bus = bus->parent)
if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
return -EINVAL;
status = msi_init(); status = msi_init();
if (status < 0) if (status < 0)
return status; return status;
...@@ -1300,24 +1335,6 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev) ...@@ -1300,24 +1335,6 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev)
} }
msi_free_vector(dev, vector, 0); msi_free_vector(dev, vector, 0);
if (warning) { if (warning) {
/* Force to release the MSI-X memory-mapped table */
#if 0
unsigned long phys_addr;
u32 table_offset;
u16 control;
u8 bir;
pci_read_config_word(dev, msi_control_reg(pos),
&control);
pci_read_config_dword(dev, msix_table_offset_reg(pos),
&table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
phys_addr = pci_resource_start(dev, bir) + table_offset;
/*
* FIXME! and what did you want to do with phys_addr?
*/
#endif
iounmap(base); iounmap(base);
printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
"called without free_irq() on all MSI-X vectors\n", "called without free_irq() on all MSI-X vectors\n",
......
...@@ -6,6 +6,68 @@ ...@@ -6,6 +6,68 @@
#ifndef MSI_H #ifndef MSI_H
#define MSI_H #define MSI_H
/*
* MSI operation vector. Used by the msi core code (drivers/pci/msi.c)
* to abstract platform-specific tasks relating to MSI address generation
* and resource management.
*/
struct msi_ops {
/**
* setup - generate an MSI bus address and data for a given vector
* @pdev: PCI device context (in)
* @vector: vector allocated by the msi core (in)
* @addr_hi: upper 32 bits of PCI bus MSI address (out)
* @addr_lo: lower 32 bits of PCI bus MSI address (out)
* @data: MSI data payload (out)
*
* Description: The setup op is used to generate a PCI bus addres and
* data which the msi core will program into the card MSI capability
* registers. The setup routine is responsible for picking an initial
* cpu to target the MSI at. The setup routine is responsible for
* examining pdev to determine the MSI capabilities of the card and
* generating a suitable address/data. The setup routine is
* responsible for allocating and tracking any system resources it
* needs to route the MSI to the cpu it picks, and for associating
* those resources with the passed in vector.
*
* Returns 0 if the MSI address/data was successfully setup.
**/
int (*setup) (struct pci_dev *pdev, unsigned int vector,
u32 *addr_hi, u32 *addr_lo, u32 *data);
/**
* teardown - release resources allocated by setup
* @vector: vector context for resources (in)
*
* Description: The teardown op is used to release any resources
* that were allocated in the setup routine associated with the passed
* in vector.
**/
void (*teardown) (unsigned int vector);
/**
* target - retarget an MSI at a different cpu
* @vector: vector context for resources (in)
* @cpu: new cpu to direct vector at (in)
* @addr_hi: new value of PCI bus upper 32 bits (in/out)
* @addr_lo: new value of PCI bus lower 32 bits (in/out)
*
* Description: The target op is used to redirect an MSI vector
* at a different cpu. addr_hi/addr_lo coming in are the existing
* values that the MSI core has programmed into the card. The
* target code is responsible for freeing any resources (if any)
* associated with the old address, and generating a new PCI bus
* addr_hi/addr_lo that will redirect the vector at the indicated cpu.
**/
void (*target) (unsigned int vector, unsigned int cpu,
u32 *addr_hi, u32 *addr_lo);
};
extern int msi_register(struct msi_ops *ops);
#include <asm/msi.h> #include <asm/msi.h>
/* /*
...@@ -63,67 +125,6 @@ extern int pci_vector_resources(int last, int nr_released); ...@@ -63,67 +125,6 @@ extern int pci_vector_resources(int last, int nr_released);
#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK) #define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK) #define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
/*
* MSI Defined Data Structures
*/
#define MSI_ADDRESS_HEADER 0xfee
#define MSI_ADDRESS_HEADER_SHIFT 12
#define MSI_ADDRESS_HEADER_MASK 0xfff000
#define MSI_ADDRESS_DEST_ID_MASK 0xfff0000f
#define MSI_TARGET_CPU_MASK 0xff
#define MSI_DELIVERY_MODE 0
#define MSI_LEVEL_MODE 1 /* Edge always assert */
#define MSI_TRIGGER_MODE 0 /* MSI is edge sensitive */
#define MSI_PHYSICAL_MODE 0
#define MSI_LOGICAL_MODE 1
#define MSI_REDIRECTION_HINT_MODE 0
struct msg_data {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u32 vector : 8;
__u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
__u32 reserved_1 : 3;
__u32 level : 1; /* 0: deassert | 1: assert */
__u32 trigger : 1; /* 0: edge | 1: level */
__u32 reserved_2 : 16;
#elif defined(__BIG_ENDIAN_BITFIELD)
__u32 reserved_2 : 16;
__u32 trigger : 1; /* 0: edge | 1: level */
__u32 level : 1; /* 0: deassert | 1: assert */
__u32 reserved_1 : 3;
__u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
__u32 vector : 8;
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} __attribute__ ((packed));
struct msg_address {
union {
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u32 reserved_1 : 2;
__u32 dest_mode : 1; /*0:physic | 1:logic */
__u32 redirection_hint: 1; /*0: dedicated CPU
1: lowest priority */
__u32 reserved_2 : 4;
__u32 dest_id : 24; /* Destination ID */
#elif defined(__BIG_ENDIAN_BITFIELD)
__u32 dest_id : 24; /* Destination ID */
__u32 reserved_2 : 4;
__u32 redirection_hint: 1; /*0: dedicated CPU
1: lowest priority */
__u32 dest_mode : 1; /*0:physic | 1:logic */
__u32 reserved_1 : 2;
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
}u;
__u32 value;
}lo_address;
__u32 hi_address;
} __attribute__ ((packed));
struct msi_desc { struct msi_desc {
struct { struct {
__u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
...@@ -132,7 +133,7 @@ struct msi_desc { ...@@ -132,7 +133,7 @@ struct msi_desc {
__u8 reserved: 1; /* reserved */ __u8 reserved: 1; /* reserved */
__u8 entry_nr; /* specific enabled entry */ __u8 entry_nr; /* specific enabled entry */
__u8 default_vector; /* default pre-assigned vector */ __u8 default_vector; /* default pre-assigned vector */
__u8 current_cpu; /* current destination cpu */ __u8 unused; /* formerly unused destination cpu*/
}msi_attrib; }msi_attrib;
struct { struct {
...@@ -142,6 +143,14 @@ struct msi_desc { ...@@ -142,6 +143,14 @@ struct msi_desc {
void __iomem *mask_base; void __iomem *mask_base;
struct pci_dev *dev; struct pci_dev *dev;
#ifdef CONFIG_PM
/* PM save area for MSIX address/data */
u32 address_hi_save;
u32 address_lo_save;
u32 data_save;
#endif
}; };
#endif /* MSI_H */ #endif /* MSI_H */
...@@ -267,7 +267,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) ...@@ -267,7 +267,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
/* ACPI bus type */ /* ACPI bus type */
static int pci_acpi_find_device(struct device *dev, acpi_handle *handle) static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
{ {
struct pci_dev * pci_dev; struct pci_dev * pci_dev;
acpi_integer addr; acpi_integer addr;
...@@ -281,7 +281,7 @@ static int pci_acpi_find_device(struct device *dev, acpi_handle *handle) ...@@ -281,7 +281,7 @@ static int pci_acpi_find_device(struct device *dev, acpi_handle *handle)
return 0; return 0;
} }
static int pci_acpi_find_root_bridge(struct device *dev, acpi_handle *handle) static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle)
{ {
int num; int num;
unsigned int seg, bus; unsigned int seg, bus;
...@@ -299,21 +299,21 @@ static int pci_acpi_find_root_bridge(struct device *dev, acpi_handle *handle) ...@@ -299,21 +299,21 @@ static int pci_acpi_find_root_bridge(struct device *dev, acpi_handle *handle)
return 0; return 0;
} }
static struct acpi_bus_type pci_acpi_bus = { static struct acpi_bus_type acpi_pci_bus = {
.bus = &pci_bus_type, .bus = &pci_bus_type,
.find_device = pci_acpi_find_device, .find_device = acpi_pci_find_device,
.find_bridge = pci_acpi_find_root_bridge, .find_bridge = acpi_pci_find_root_bridge,
}; };
static int __init pci_acpi_init(void) static int __init acpi_pci_init(void)
{ {
int ret; int ret;
ret = register_acpi_bus_type(&pci_acpi_bus); ret = register_acpi_bus_type(&acpi_pci_bus);
if (ret) if (ret)
return 0; return 0;
platform_pci_choose_state = acpi_pci_choose_state; platform_pci_choose_state = acpi_pci_choose_state;
platform_pci_set_power_state = acpi_pci_set_power_state; platform_pci_set_power_state = acpi_pci_set_power_state;
return 0; return 0;
} }
arch_initcall(pci_acpi_init); arch_initcall(acpi_pci_init);
...@@ -43,6 +43,29 @@ pci_config_attr(subsystem_vendor, "0x%04x\n"); ...@@ -43,6 +43,29 @@ pci_config_attr(subsystem_vendor, "0x%04x\n");
pci_config_attr(subsystem_device, "0x%04x\n"); pci_config_attr(subsystem_device, "0x%04x\n");
pci_config_attr(class, "0x%06x\n"); pci_config_attr(class, "0x%06x\n");
pci_config_attr(irq, "%u\n"); pci_config_attr(irq, "%u\n");
pci_config_attr(is_enabled, "%u\n");
static ssize_t broken_parity_status_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
return sprintf (buf, "%u\n", pdev->broken_parity_status);
}
static ssize_t broken_parity_status_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
ssize_t consumed = -EINVAL;
if ((count > 0) && (*buf == '0' || *buf == '1')) {
pdev->broken_parity_status = *buf == '1' ? 1 : 0;
consumed = count;
}
return consumed;
}
static ssize_t local_cpus_show(struct device *dev, static ssize_t local_cpus_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
...@@ -90,6 +113,25 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, ...@@ -90,6 +113,25 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
(u8)(pci_dev->class)); (u8)(pci_dev->class));
} }
static ssize_t
is_enabled_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
/* this can crash the machine when done on the "wrong" device */
if (!capable(CAP_SYS_ADMIN))
return count;
if (*buf == '0')
pci_disable_device(pdev);
if (*buf == '1')
pci_enable_device(pdev);
return count;
}
struct device_attribute pci_dev_attrs[] = { struct device_attribute pci_dev_attrs[] = {
__ATTR_RO(resource), __ATTR_RO(resource),
...@@ -101,6 +143,9 @@ struct device_attribute pci_dev_attrs[] = { ...@@ -101,6 +143,9 @@ struct device_attribute pci_dev_attrs[] = {
__ATTR_RO(irq), __ATTR_RO(irq),
__ATTR_RO(local_cpus), __ATTR_RO(local_cpus),
__ATTR_RO(modalias), __ATTR_RO(modalias),
__ATTR(enable, 0600, is_enabled_show, is_enabled_store),
__ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
broken_parity_status_show,broken_parity_status_store),
__ATTR_NULL, __ATTR_NULL,
}; };
......
...@@ -517,7 +517,12 @@ pci_enable_device_bars(struct pci_dev *dev, int bars) ...@@ -517,7 +517,12 @@ pci_enable_device_bars(struct pci_dev *dev, int bars)
int int
pci_enable_device(struct pci_dev *dev) pci_enable_device(struct pci_dev *dev)
{ {
int err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); int err;
if (dev->is_enabled)
return 0;
err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
if (err) if (err)
return err; return err;
pci_fixup_device(pci_fixup_enable, dev); pci_fixup_device(pci_fixup_enable, dev);
...@@ -547,6 +552,13 @@ pci_disable_device(struct pci_dev *dev) ...@@ -547,6 +552,13 @@ pci_disable_device(struct pci_dev *dev)
{ {
u16 pci_command; u16 pci_command;
if (dev->msi_enabled)
disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
PCI_CAP_ID_MSI);
if (dev->msix_enabled)
disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
PCI_CAP_ID_MSIX);
pci_read_config_word(dev, PCI_COMMAND, &pci_command); pci_read_config_word(dev, PCI_COMMAND, &pci_command);
if (pci_command & PCI_COMMAND_MASTER) { if (pci_command & PCI_COMMAND_MASTER) {
pci_command &= ~PCI_COMMAND_MASTER; pci_command &= ~PCI_COMMAND_MASTER;
......
...@@ -40,7 +40,7 @@ extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int ...@@ -40,7 +40,7 @@ extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int
extern void pci_remove_legacy_files(struct pci_bus *bus); extern void pci_remove_legacy_files(struct pci_bus *bus);
/* Lock for read/write access to pci device and bus lists */ /* Lock for read/write access to pci device and bus lists */
extern spinlock_t pci_bus_lock; extern struct rw_semaphore pci_bus_sem;
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
extern int pci_msi_quirk; extern int pci_msi_quirk;
......
...@@ -180,25 +180,31 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) ...@@ -180,25 +180,31 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
res->flags |= pci_calc_resource_flags(l); res->flags |= pci_calc_resource_flags(l);
if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
== (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) { == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
pci_read_config_dword(dev, reg+4, &l); u32 szhi, lhi;
pci_read_config_dword(dev, reg+4, &lhi);
pci_write_config_dword(dev, reg+4, ~0);
pci_read_config_dword(dev, reg+4, &szhi);
pci_write_config_dword(dev, reg+4, lhi);
szhi = pci_size(lhi, szhi, 0xffffffff);
next++; next++;
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
res->start |= ((unsigned long) l) << 32; res->start |= ((unsigned long) lhi) << 32;
res->end = res->start + sz; res->end = res->start + sz;
pci_write_config_dword(dev, reg+4, ~0); if (szhi) {
pci_read_config_dword(dev, reg+4, &sz);
pci_write_config_dword(dev, reg+4, l);
sz = pci_size(l, sz, 0xffffffff);
if (sz) {
/* This BAR needs > 4GB? Wow. */ /* This BAR needs > 4GB? Wow. */
res->end |= (unsigned long)sz<<32; res->end |= (unsigned long)szhi<<32;
} }
#else #else
if (l) { if (szhi) {
printk(KERN_ERR "PCI: Unable to handle 64-bit address for device %s\n", pci_name(dev)); printk(KERN_ERR "PCI: Unable to handle 64-bit BAR for device %s\n", pci_name(dev));
res->start = 0; res->start = 0;
res->flags = 0; res->flags = 0;
continue; } else if (lhi) {
/* 64-bit wide address, treat as disabled */
pci_write_config_dword(dev, reg, l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
pci_write_config_dword(dev, reg+4, 0);
res->start = 0;
res->end = sz;
} }
#endif #endif
} }
...@@ -377,9 +383,9 @@ struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_de ...@@ -377,9 +383,9 @@ struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_de
child = pci_alloc_child_bus(parent, dev, busnr); child = pci_alloc_child_bus(parent, dev, busnr);
if (child) { if (child) {
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_add_tail(&child->node, &parent->children); list_add_tail(&child->node, &parent->children);
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
} }
return child; return child;
} }
...@@ -838,9 +844,9 @@ void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus) ...@@ -838,9 +844,9 @@ void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
* and the bus list for fixup functions, etc. * and the bus list for fixup functions, etc.
*/ */
INIT_LIST_HEAD(&dev->global_list); INIT_LIST_HEAD(&dev->global_list);
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_add_tail(&dev->bus_list, &bus->devices); list_add_tail(&dev->bus_list, &bus->devices);
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
} }
struct pci_dev * __devinit struct pci_dev * __devinit
...@@ -975,9 +981,10 @@ struct pci_bus * __devinit pci_create_bus(struct device *parent, ...@@ -975,9 +981,10 @@ struct pci_bus * __devinit pci_create_bus(struct device *parent,
pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
goto err_out; goto err_out;
} }
spin_lock(&pci_bus_lock);
down_write(&pci_bus_sem);
list_add_tail(&b->node, &pci_root_buses); list_add_tail(&b->node, &pci_root_buses);
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
memset(dev, 0, sizeof(*dev)); memset(dev, 0, sizeof(*dev));
dev->parent = parent; dev->parent = parent;
...@@ -1017,9 +1024,9 @@ struct pci_bus * __devinit pci_create_bus(struct device *parent, ...@@ -1017,9 +1024,9 @@ struct pci_bus * __devinit pci_create_bus(struct device *parent,
class_dev_reg_err: class_dev_reg_err:
device_unregister(dev); device_unregister(dev);
dev_reg_err: dev_reg_err:
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_del(&b->node); list_del(&b->node);
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
err_out: err_out:
kfree(dev); kfree(dev);
kfree(b); kfree(b);
......
...@@ -24,6 +24,17 @@ ...@@ -24,6 +24,17 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include "pci.h" #include "pci.h"
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
*/
static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
{
dev->broken_parity_status = 1; /* This device gives false positives */
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
/* Deal with broken BIOS'es that neglect to enable passive release, /* Deal with broken BIOS'es that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */ which can cause problems in combination with the 82441FX/PPro MTRRs */
static void __devinit quirk_passive_release(struct pci_dev *dev) static void __devinit quirk_passive_release(struct pci_dev *dev)
...@@ -878,27 +889,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e ...@@ -878,27 +889,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
* when a PCI-Soundcard is added. The BIOS only gives Options * when a PCI-Soundcard is added. The BIOS only gives Options
* "Disabled" and "AUTO". This Quirk Sets the corresponding * "Disabled" and "AUTO". This Quirk Sets the corresponding
* Register-Value to enable the Soundcard. * Register-Value to enable the Soundcard.
*
* FIXME: Presently this quirk will run on anything that has an 8237
* which isn't correct, we need to check DMI tables or something in
* order to make sure it only runs on the MSI-K8T-Neo2Fir. Because it
* runs everywhere at present we suppress the printk output in most
* irrelevant cases.
*/ */
static void __init k8t_sound_hostbridge(struct pci_dev *dev) static void __init k8t_sound_hostbridge(struct pci_dev *dev)
{ {
unsigned char val; unsigned char val;
printk(KERN_INFO "PCI: Quirk-MSI-K8T Soundcard On\n");
pci_read_config_byte(dev, 0x50, &val); pci_read_config_byte(dev, 0x50, &val);
if (val == 0x88 || val == 0xc8) { if (val == 0x88 || val == 0xc8) {
/* Assume it's probably a MSI-K8T-Neo2Fir */
printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, attempting to turn soundcard ON\n");
pci_write_config_byte(dev, 0x50, val & (~0x40)); pci_write_config_byte(dev, 0x50, val & (~0x40));
/* Verify the Change for Status output */ /* Verify the Change for Status output */
pci_read_config_byte(dev, 0x50, &val); pci_read_config_byte(dev, 0x50, &val);
if (val & 0x40) if (val & 0x40)
printk(KERN_INFO "PCI: MSI-K8T soundcard still off\n"); printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard still off\n");
else else
printk(KERN_INFO "PCI: MSI-K8T soundcard on\n"); printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard on\n");
} else {
printk(KERN_INFO "PCI: Unexpected Value in PCI-Register: "
"no Change!\n");
} }
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
...@@ -1485,6 +1499,25 @@ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) ...@@ -1485,6 +1499,25 @@ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
/* Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
*/
static void __devinit quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
{
uint8_t b;
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
printk(KERN_INFO
"PCI: Linking AER extended capability on %s\n",
pci_name(dev));
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
EXPORT_SYMBOL(pcie_mch_quirk); EXPORT_SYMBOL(pcie_mch_quirk);
#ifdef CONFIG_HOTPLUG #ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pci_fixup_device); EXPORT_SYMBOL(pci_fixup_device);
......
...@@ -22,18 +22,18 @@ static void pci_destroy_dev(struct pci_dev *dev) ...@@ -22,18 +22,18 @@ static void pci_destroy_dev(struct pci_dev *dev)
pci_proc_detach_device(dev); pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev); pci_remove_sysfs_dev_files(dev);
device_unregister(&dev->dev); device_unregister(&dev->dev);
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_del(&dev->global_list); list_del(&dev->global_list);
dev->global_list.next = dev->global_list.prev = NULL; dev->global_list.next = dev->global_list.prev = NULL;
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
} }
/* Remove the device from the device lists, and prevent any further /* Remove the device from the device lists, and prevent any further
* list accesses from this device */ * list accesses from this device */
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_del(&dev->bus_list); list_del(&dev->bus_list);
dev->bus_list.next = dev->bus_list.prev = NULL; dev->bus_list.next = dev->bus_list.prev = NULL;
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
pci_free_resources(dev); pci_free_resources(dev);
pci_dev_put(dev); pci_dev_put(dev);
...@@ -62,9 +62,9 @@ void pci_remove_bus(struct pci_bus *pci_bus) ...@@ -62,9 +62,9 @@ void pci_remove_bus(struct pci_bus *pci_bus)
{ {
pci_proc_detach_bus(pci_bus); pci_proc_detach_bus(pci_bus);
spin_lock(&pci_bus_lock); down_write(&pci_bus_sem);
list_del(&pci_bus->node); list_del(&pci_bus->node);
spin_unlock(&pci_bus_lock); up_write(&pci_bus_sem);
pci_remove_legacy_files(pci_bus); pci_remove_legacy_files(pci_bus);
class_device_remove_file(&pci_bus->class_dev, class_device_remove_file(&pci_bus->class_dev,
&class_device_attr_cpuaffinity); &class_device_attr_cpuaffinity);
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include "pci.h" #include "pci.h"
DEFINE_SPINLOCK(pci_bus_lock); DECLARE_RWSEM(pci_bus_sem);
static struct pci_bus * __devinit static struct pci_bus * __devinit
pci_do_find_bus(struct pci_bus* bus, unsigned char busnr) pci_do_find_bus(struct pci_bus* bus, unsigned char busnr)
...@@ -72,11 +72,11 @@ pci_find_next_bus(const struct pci_bus *from) ...@@ -72,11 +72,11 @@ pci_find_next_bus(const struct pci_bus *from)
struct pci_bus *b = NULL; struct pci_bus *b = NULL;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
n = from ? from->node.next : pci_root_buses.next; n = from ? from->node.next : pci_root_buses.next;
if (n != &pci_root_buses) if (n != &pci_root_buses)
b = pci_bus_b(n); b = pci_bus_b(n);
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
return b; return b;
} }
...@@ -124,7 +124,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) ...@@ -124,7 +124,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
struct pci_dev *dev; struct pci_dev *dev;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
list_for_each(tmp, &bus->devices) { list_for_each(tmp, &bus->devices) {
dev = pci_dev_b(tmp); dev = pci_dev_b(tmp);
...@@ -135,7 +135,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) ...@@ -135,7 +135,7 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
dev = NULL; dev = NULL;
out: out:
pci_dev_get(dev); pci_dev_get(dev);
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
return dev; return dev;
} }
...@@ -167,7 +167,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor, ...@@ -167,7 +167,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
struct pci_dev *dev; struct pci_dev *dev;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
n = from ? from->global_list.next : pci_devices.next; n = from ? from->global_list.next : pci_devices.next;
while (n && (n != &pci_devices)) { while (n && (n != &pci_devices)) {
...@@ -181,7 +181,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor, ...@@ -181,7 +181,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
} }
dev = NULL; dev = NULL;
exit: exit:
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
return dev; return dev;
} }
...@@ -232,7 +232,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device, ...@@ -232,7 +232,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
struct pci_dev *dev; struct pci_dev *dev;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
n = from ? from->global_list.next : pci_devices.next; n = from ? from->global_list.next : pci_devices.next;
while (n && (n != &pci_devices)) { while (n && (n != &pci_devices)) {
...@@ -247,7 +247,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device, ...@@ -247,7 +247,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
dev = NULL; dev = NULL;
exit: exit:
dev = pci_dev_get(dev); dev = pci_dev_get(dev);
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
pci_dev_put(from); pci_dev_put(from);
return dev; return dev;
} }
...@@ -292,7 +292,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p ...@@ -292,7 +292,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p
struct pci_dev *dev; struct pci_dev *dev;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
n = from ? from->global_list.prev : pci_devices.prev; n = from ? from->global_list.prev : pci_devices.prev;
while (n && (n != &pci_devices)) { while (n && (n != &pci_devices)) {
...@@ -304,7 +304,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p ...@@ -304,7 +304,7 @@ pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct p
} }
dev = NULL; dev = NULL;
exit: exit:
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
return dev; return dev;
} }
...@@ -328,7 +328,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) ...@@ -328,7 +328,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
struct pci_dev *dev; struct pci_dev *dev;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
n = from ? from->global_list.next : pci_devices.next; n = from ? from->global_list.next : pci_devices.next;
while (n && (n != &pci_devices)) { while (n && (n != &pci_devices)) {
...@@ -340,7 +340,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) ...@@ -340,7 +340,7 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
dev = NULL; dev = NULL;
exit: exit:
dev = pci_dev_get(dev); dev = pci_dev_get(dev);
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
pci_dev_put(from); pci_dev_put(from);
return dev; return dev;
} }
...@@ -362,7 +362,7 @@ int pci_dev_present(const struct pci_device_id *ids) ...@@ -362,7 +362,7 @@ int pci_dev_present(const struct pci_device_id *ids)
int found = 0; int found = 0;
WARN_ON(in_interrupt()); WARN_ON(in_interrupt());
spin_lock(&pci_bus_lock); down_read(&pci_bus_sem);
while (ids->vendor || ids->subvendor || ids->class_mask) { while (ids->vendor || ids->subvendor || ids->class_mask) {
list_for_each_entry(dev, &pci_devices, global_list) { list_for_each_entry(dev, &pci_devices, global_list) {
if (pci_match_one_device(ids, dev)) { if (pci_match_one_device(ids, dev)) {
...@@ -373,7 +373,7 @@ int pci_dev_present(const struct pci_device_id *ids) ...@@ -373,7 +373,7 @@ int pci_dev_present(const struct pci_device_id *ids)
ids++; ids++;
} }
exit: exit:
spin_unlock(&pci_bus_lock); up_read(&pci_bus_sem);
return found; return found;
} }
EXPORT_SYMBOL(pci_dev_present); EXPORT_SYMBOL(pci_dev_present);
......
...@@ -55,9 +55,10 @@ pbus_assign_resources_sorted(struct pci_bus *bus) ...@@ -55,9 +55,10 @@ pbus_assign_resources_sorted(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) { list_for_each_entry(dev, &bus->devices, bus_list) {
u16 class = dev->class >> 8; u16 class = dev->class >> 8;
/* Don't touch classless devices and host bridges. */ /* Don't touch classless devices or host bridges or ioapics. */
if (class == PCI_CLASS_NOT_DEFINED || if (class == PCI_CLASS_NOT_DEFINED ||
class == PCI_CLASS_BRIDGE_HOST) class == PCI_CLASS_BRIDGE_HOST ||
class == PCI_CLASS_SYSTEM_PIC)
continue; continue;
pdev_sort_resources(dev, &head); pdev_sort_resources(dev, &head);
......
...@@ -155,6 +155,46 @@ int pci_assign_resource(struct pci_dev *dev, int resno) ...@@ -155,6 +155,46 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret; return ret;
} }
#ifdef CONFIG_EMBEDDED
int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
{
struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
unsigned int type_mask;
int i, ret = -EBUSY;
type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
if (!r)
continue;
/* type_mask must match */
if ((res->flags ^ r->flags) & type_mask)
continue;
ret = request_resource(r, res);
if (ret == 0)
break;
}
if (ret) {
printk(KERN_ERR "PCI: Failed to allocate %s resource "
"#%d:%llx@%llx for %s\n",
res->flags & IORESOURCE_IO ? "I/O" : "mem",
resno, (unsigned long long)(res->end - res->start + 1),
(unsigned long long)res->start, pci_name(dev));
} else if (resno < PCI_BRIDGE_RESOURCES) {
pci_update_resource(dev, res, resno);
}
return ret;
}
EXPORT_SYMBOL_GPL(pci_assign_resource_fixed);
#endif
/* Sort resources by alignment */ /* Sort resources by alignment */
void __devinit void __devinit
pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
......
...@@ -397,30 +397,6 @@ ...@@ -397,30 +397,6 @@
#include "ql1280_fw.h" #include "ql1280_fw.h"
#include "ql1040_fw.h" #include "ql1040_fw.h"
/*
* Missing PCI ID's
*/
#ifndef PCI_DEVICE_ID_QLOGIC_ISP1080
#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP1240
#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP1280
#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP10160
#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP12160
#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216
#endif
#ifndef PCI_VENDOR_ID_AMI
#define PCI_VENDOR_ID_AMI 0x101e
#endif
#ifndef BITS_PER_LONG #ifndef BITS_PER_LONG
#error "BITS_PER_LONG not defined!" #error "BITS_PER_LONG not defined!"
#endif #endif
......
...@@ -433,13 +433,14 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d ...@@ -433,13 +433,14 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
/* /*
* 0x1725/0x7174 is the Vitesse VSC-7174 * Intel 31244 is supposed to be identical.
* 0x8086/0x3200 is the Intel 31244, which is supposed to be identical * Compatibility is untested as of yet.
* compatibility is untested as of yet
*/ */
static const struct pci_device_id vsc_sata_pci_tbl[] = { static const struct pci_device_id vsc_sata_pci_tbl[] = {
{ 0x1725, 0x7174, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174,
{ 0x8086, 0x3200, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244,
PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
{ } { }
}; };
......
...@@ -9,7 +9,15 @@ ...@@ -9,7 +9,15 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <mach_apic.h> #include <mach_apic.h>
#define LAST_DEVICE_VECTOR 232 #define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1)
#define MSI_TARGET_CPU_SHIFT 12 #define MSI_TARGET_CPU_SHIFT 12
extern struct msi_ops msi_apic_ops;
static inline int msi_arch_init(void)
{
msi_register(&msi_apic_ops);
return 0;
}
#endif /* ASM_MSI_H */ #endif /* ASM_MSI_H */
...@@ -47,9 +47,19 @@ typedef u8 ia64_vector; ...@@ -47,9 +47,19 @@ typedef u8 ia64_vector;
#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */ #define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
/* /*
* Vectors 0x20-0x2f are reserved for legacy ISA IRQs. * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
* Use vectors 0x30-0xe7 as the default device vector range for ia64.
* Platforms may choose to reduce this range in platform_irq_setup, but the
* platform range must fall within
* [IA64_DEF_FIRST_DEVICE_VECTOR..IA64_DEF_LAST_DEVICE_VECTOR]
*/ */
#define IA64_FIRST_DEVICE_VECTOR 0x30 extern int ia64_first_device_vector;
#define IA64_LAST_DEVICE_VECTOR 0xe7 extern int ia64_last_device_vector;
#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
#define IA64_MAX_DEVICE_VECTORS (IA64_DEF_LAST_DEVICE_VECTOR - IA64_DEF_FIRST_DEVICE_VECTOR + 1)
#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1) #define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */ #define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
...@@ -83,6 +93,7 @@ extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt ...@@ -83,6 +93,7 @@ extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt
extern int assign_irq_vector (int irq); /* allocate a free vector */ extern int assign_irq_vector (int irq); /* allocate a free vector */
extern void free_irq_vector (int vector); extern void free_irq_vector (int vector);
extern int reserve_irq_vector (int vector);
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
......
...@@ -75,6 +75,7 @@ typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *); ...@@ -75,6 +75,7 @@ typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *); typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
typedef int ia64_mv_msi_init_t (void);
static inline void static inline void
machvec_noop (void) machvec_noop (void)
...@@ -153,6 +154,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); ...@@ -153,6 +154,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_readl_relaxed ia64_mv.readl_relaxed # define platform_readl_relaxed ia64_mv.readl_relaxed
# define platform_readq_relaxed ia64_mv.readq_relaxed # define platform_readq_relaxed ia64_mv.readq_relaxed
# define platform_migrate ia64_mv.migrate # define platform_migrate ia64_mv.migrate
# define platform_msi_init ia64_mv.msi_init
# endif # endif
/* __attribute__((__aligned__(16))) is required to make size of the /* __attribute__((__aligned__(16))) is required to make size of the
...@@ -202,6 +204,7 @@ struct ia64_machine_vector { ...@@ -202,6 +204,7 @@ struct ia64_machine_vector {
ia64_mv_readl_relaxed_t *readl_relaxed; ia64_mv_readl_relaxed_t *readl_relaxed;
ia64_mv_readq_relaxed_t *readq_relaxed; ia64_mv_readq_relaxed_t *readq_relaxed;
ia64_mv_migrate_t *migrate; ia64_mv_migrate_t *migrate;
ia64_mv_msi_init_t *msi_init;
} __attribute__((__aligned__(16))); /* align attrib? see above comment */ } __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \ #define MACHVEC_INIT(name) \
...@@ -247,6 +250,7 @@ struct ia64_machine_vector { ...@@ -247,6 +250,7 @@ struct ia64_machine_vector {
platform_readl_relaxed, \ platform_readl_relaxed, \
platform_readq_relaxed, \ platform_readq_relaxed, \
platform_migrate, \ platform_migrate, \
platform_msi_init, \
} }
extern struct ia64_machine_vector ia64_mv; extern struct ia64_machine_vector ia64_mv;
...@@ -400,5 +404,8 @@ extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size ...@@ -400,5 +404,8 @@ extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size
#ifndef platform_migrate #ifndef platform_migrate
# define platform_migrate machvec_noop_task # define platform_migrate machvec_noop_task
#endif #endif
#ifndef platform_msi_init
# define platform_msi_init ((ia64_mv_msi_init_t*)NULL)
#endif
#endif /* _ASM_IA64_MACHVEC_H */ #endif /* _ASM_IA64_MACHVEC_H */
...@@ -67,6 +67,8 @@ extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; ...@@ -67,6 +67,8 @@ extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
extern ia64_mv_dma_mapping_error sn_dma_mapping_error; extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
extern ia64_mv_dma_supported sn_dma_supported; extern ia64_mv_dma_supported sn_dma_supported;
extern ia64_mv_migrate_t sn_migrate; extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_msi_init_t sn_msi_init;
/* /*
* This stuff has dual use! * This stuff has dual use!
...@@ -117,6 +119,11 @@ extern ia64_mv_migrate_t sn_migrate; ...@@ -117,6 +119,11 @@ extern ia64_mv_migrate_t sn_migrate;
#define platform_dma_mapping_error sn_dma_mapping_error #define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported #define platform_dma_supported sn_dma_supported
#define platform_migrate sn_migrate #define platform_migrate sn_migrate
#ifdef CONFIG_PCI_MSI
#define platform_msi_init sn_msi_init
#else
#define platform_msi_init ((ia64_mv_msi_init_t*)NULL)
#endif
#include <asm/sn/io.h> #include <asm/sn/io.h>
......
...@@ -14,4 +14,16 @@ static inline void set_intr_gate (int nr, void *func) {} ...@@ -14,4 +14,16 @@ static inline void set_intr_gate (int nr, void *func) {}
#define ack_APIC_irq ia64_eoi #define ack_APIC_irq ia64_eoi
#define MSI_TARGET_CPU_SHIFT 4 #define MSI_TARGET_CPU_SHIFT 4
extern struct msi_ops msi_apic_ops;
static inline int msi_arch_init(void)
{
if (platform_msi_init)
return platform_msi_init();
/* default ops for most ia64 platforms */
msi_register(&msi_apic_ops);
return 0;
}
#endif /* ASM_MSI_H */ #endif /* ASM_MSI_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define _ASM_IA64_SN_INTR_H #define _ASM_IA64_SN_INTR_H
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <asm/sn/types.h>
#define SGI_UART_VECTOR 0xe9 #define SGI_UART_VECTOR 0xe9
...@@ -40,6 +41,7 @@ struct sn_irq_info { ...@@ -40,6 +41,7 @@ struct sn_irq_info {
int irq_cpuid; /* kernel logical cpuid */ int irq_cpuid; /* kernel logical cpuid */
int irq_irq; /* the IRQ number */ int irq_irq; /* the IRQ number */
int irq_int_bit; /* Bridge interrupt pin */ int irq_int_bit; /* Bridge interrupt pin */
/* <0 means MSI */
u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */ u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
int irq_bridge_type;/* pciio asic type (pciio.h) */ int irq_bridge_type;/* pciio asic type (pciio.h) */
void *irq_bridge; /* bridge generating irq */ void *irq_bridge; /* bridge generating irq */
...@@ -53,6 +55,12 @@ struct sn_irq_info { ...@@ -53,6 +55,12 @@ struct sn_irq_info {
}; };
extern void sn_send_IPI_phys(int, long, int, int); extern void sn_send_IPI_phys(int, long, int, int);
extern u64 sn_intr_alloc(nasid_t, int,
struct sn_irq_info *,
int, nasid_t, int);
extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
extern struct list_head **sn_irq_lh;
#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector) #define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#define PCI32_ATE_V (0x1 << 0) #define PCI32_ATE_V (0x1 << 0)
#define PCI32_ATE_CO (0x1 << 1) #define PCI32_ATE_CO (0x1 << 1)
#define PCI32_ATE_PREC (0x1 << 2) #define PCI32_ATE_PREC (0x1 << 2)
#define PCI32_ATE_MSI (0x1 << 2)
#define PCI32_ATE_PREF (0x1 << 3) #define PCI32_ATE_PREF (0x1 << 3)
#define PCI32_ATE_BAR (0x1 << 4) #define PCI32_ATE_BAR (0x1 << 4)
#define PCI32_ATE_ADDR_SHFT 12 #define PCI32_ATE_ADDR_SHFT 12
...@@ -117,8 +118,8 @@ struct pcibus_info { ...@@ -117,8 +118,8 @@ struct pcibus_info {
extern int pcibr_init_provider(void); extern int pcibr_init_provider(void);
extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t); extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
/* /*
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/ */
#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H #ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H #define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
...@@ -45,13 +45,24 @@ struct pci_controller; ...@@ -45,13 +45,24 @@ struct pci_controller;
*/ */
struct sn_pcibus_provider { struct sn_pcibus_provider {
dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t); dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t); dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
void (*dma_unmap)(struct pci_dev *, dma_addr_t, int); void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *); void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
void (*force_interrupt)(struct sn_irq_info *); void (*force_interrupt)(struct sn_irq_info *);
void (*target_interrupt)(struct sn_irq_info *); void (*target_interrupt)(struct sn_irq_info *);
}; };
/*
* Flags used by the map interfaces
* bits 3:0 specifies format of passed in address
* bit 4 specifies that address is to be used for MSI
*/
#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
extern struct sn_pcibus_provider *sn_pci_provider[]; extern struct sn_pcibus_provider *sn_pci_provider[];
#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */ #endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
...@@ -3,13 +3,14 @@ ...@@ -3,13 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/ */
#ifndef _ASM_IA64_SN_PCI_TIOCP_H #ifndef _ASM_IA64_SN_PCI_TIOCP_H
#define _ASM_IA64_SN_PCI_TIOCP_H #define _ASM_IA64_SN_PCI_TIOCP_H
#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL #define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60) #define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
/***************************************************************************** /*****************************************************************************
......
...@@ -10,7 +10,15 @@ ...@@ -10,7 +10,15 @@
#include <asm/mach_apic.h> #include <asm/mach_apic.h>
#include <asm/smp.h> #include <asm/smp.h>
#define LAST_DEVICE_VECTOR 232 #define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1)
#define MSI_TARGET_CPU_SHIFT 12 #define MSI_TARGET_CPU_SHIFT 12
extern struct msi_ops msi_apic_ops;
static inline int msi_arch_init(void)
{
msi_register(&msi_apic_ops);
return 0;
}
#endif /* ASM_MSI_H */ #endif /* ASM_MSI_H */
...@@ -162,6 +162,9 @@ struct pci_dev { ...@@ -162,6 +162,9 @@ struct pci_dev {
unsigned int is_busmaster:1; /* device is busmaster */ unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */ unsigned int no_msi:1; /* device may not use msi */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
u32 saved_config_space[16]; /* config space saved at suspend time */ u32 saved_config_space[16]; /* config space saved at suspend time */
struct hlist_head saved_cap_space; struct hlist_head saved_cap_space;
...@@ -496,6 +499,7 @@ int pci_set_dma_mask(struct pci_dev *dev, u64 mask); ...@@ -496,6 +499,7 @@ int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
int pci_assign_resource(struct pci_dev *dev, int i); int pci_assign_resource(struct pci_dev *dev, int i);
int pci_assign_resource_fixed(struct pci_dev *dev, int i);
void pci_restore_bars(struct pci_dev *dev); void pci_restore_bars(struct pci_dev *dev);
/* ROM control related routines */ /* ROM control related routines */
......
...@@ -848,7 +848,12 @@ ...@@ -848,7 +848,12 @@
#define PCI_VENDOR_ID_QLOGIC 0x1077 #define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016
#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 #define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080
#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216
#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240
#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280
#define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 #define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100
#define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 #define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200
#define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300 #define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300
...@@ -1018,6 +1023,7 @@ ...@@ -1018,6 +1023,7 @@
#define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056 #define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056
#define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057 #define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057
#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 #define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059
#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065
#define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066 #define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066
...@@ -1946,6 +1952,7 @@ ...@@ -1946,6 +1952,7 @@
#define PCI_VENDOR_ID_MELLANOX 0x15b3 #define PCI_VENDOR_ID_MELLANOX 0x15b3
#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
...@@ -1969,6 +1976,9 @@ ...@@ -1969,6 +1976,9 @@
#define PCI_VENDOR_ID_NETCELL 0x169c #define PCI_VENDOR_ID_NETCELL 0x169c
#define PCI_DEVICE_ID_REVOLUTION 0x0044 #define PCI_DEVICE_ID_REVOLUTION 0x0044
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
#define PCI_VENDOR_ID_LINKSYS 0x1737 #define PCI_VENDOR_ID_LINKSYS 0x1737
#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 #define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064
...@@ -2148,6 +2158,7 @@ ...@@ -2148,6 +2158,7 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
#define PCI_DEVICE_ID_INTEL_GD31244 0x3200
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment