Commit 883eed1b authored by Jesse Barnes's avatar Jesse Barnes Committed by Jesse Barnes

Merge branch 'pci-for-jesse' of...

Merge branch 'pci-for-jesse' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip into for-linus
parents 5e70b7f3 45aec1ae
...@@ -36,6 +36,7 @@ files, each with their own function. ...@@ -36,6 +36,7 @@ files, each with their own function.
local_cpus nearby CPU mask (cpumask, ro) local_cpus nearby CPU mask (cpumask, ro)
resource PCI resource host addresses (ascii, ro) resource PCI resource host addresses (ascii, ro)
resource0..N PCI resource N, if present (binary, mmap) resource0..N PCI resource N, if present (binary, mmap)
resource0_wc..N_wc PCI WC map resource N, if prefetchable (binary, mmap)
rom PCI ROM resource, if present (binary, ro) rom PCI ROM resource, if present (binary, ro)
subsystem_device PCI subsystem device (ascii, ro) subsystem_device PCI subsystem device (ascii, ro)
subsystem_vendor PCI subsystem vendor (ascii, ro) subsystem_vendor PCI subsystem vendor (ascii, ro)
......
...@@ -378,6 +378,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -378,6 +378,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
struct page *page; struct page *page;
unsigned long dma_mask = 0; unsigned long dma_mask = 0;
dma_addr_t bus; dma_addr_t bus;
int noretry = 0;
/* ignore region specifiers */ /* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
...@@ -397,20 +398,25 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -397,20 +398,25 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (dev->dma_mask == NULL) if (dev->dma_mask == NULL)
return NULL; return NULL;
/* Don't invoke OOM killer */ /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
gfp |= __GFP_NORETRY; if (gfp & __GFP_DMA)
noretry = 1;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* Why <=? Even when the mask is smaller than 4GB it is often /* Why <=? Even when the mask is smaller than 4GB it is often
larger than 16MB and in this case we have a chance of larger than 16MB and in this case we have a chance of
finding fitting memory in the next higher zone first. If finding fitting memory in the next higher zone first. If
not retry with true GFP_DMA. -AK */ not retry with true GFP_DMA. -AK */
if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
gfp |= GFP_DMA32; gfp |= GFP_DMA32;
if (dma_mask < DMA_32BIT_MASK)
noretry = 1;
}
#endif #endif
again: again:
page = dma_alloc_pages(dev, gfp, get_order(size)); page = dma_alloc_pages(dev,
noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
if (page == NULL) if (page == NULL)
return NULL; return NULL;
......
...@@ -328,18 +328,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { ...@@ -328,18 +328,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
#endif #endif
{ {
.callback = set_bf_sort, .callback = set_bf_sort,
.ident = "HP ProLiant DL385 G2", .ident = "HP ProLiant DL360",
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"), DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
}, },
}, },
{ {
.callback = set_bf_sort, .callback = set_bf_sort,
.ident = "HP ProLiant DL585 G2", .ident = "HP ProLiant DL380",
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
}, },
}, },
{} {}
......
...@@ -181,7 +181,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, ...@@ -181,7 +181,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
any need to change it. */ any need to change it. */
struct mempolicy *oldpol; struct mempolicy *oldpol;
cpumask_t oldmask = current->cpus_allowed; cpumask_t oldmask = current->cpus_allowed;
int node = pcibus_to_node(dev->bus); int node = dev_to_node(&dev->dev);
if (node >= 0) { if (node >= 0) {
node_to_cpumask_ptr(nodecpumask, node); node_to_cpumask_ptr(nodecpumask, node);
......
...@@ -489,13 +489,14 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, ...@@ -489,13 +489,14 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
* @kobj: kobject for mapping * @kobj: kobject for mapping
* @attr: struct bin_attribute for the file being mapped * @attr: struct bin_attribute for the file being mapped
* @vma: struct vm_area_struct passed into the mmap * @vma: struct vm_area_struct passed into the mmap
* @write_combine: 1 for write_combine mapping
* *
* Use the regular PCI mapping routines to map a PCI resource into userspace. * Use the regular PCI mapping routines to map a PCI resource into userspace.
* FIXME: write combining? maybe automatic for prefetchable regions? * FIXME: write combining? maybe automatic for prefetchable regions?
*/ */
static int static int
pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
struct vm_area_struct *vma) struct vm_area_struct *vma, int write_combine)
{ {
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj)); struct device, kobj));
...@@ -518,7 +519,21 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, ...@@ -518,7 +519,21 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
vma->vm_pgoff += start >> PAGE_SHIFT; vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
return pci_mmap_page_range(pdev, vma, mmap_type, 0); return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
}
static int
pci_mmap_resource_uc(struct kobject *kobj, struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
static int
pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
} }
/** /**
...@@ -541,9 +556,46 @@ pci_remove_resource_files(struct pci_dev *pdev) ...@@ -541,9 +556,46 @@ pci_remove_resource_files(struct pci_dev *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
kfree(res_attr); kfree(res_attr);
} }
res_attr = pdev->res_attr_wc[i];
if (res_attr) {
sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
kfree(res_attr);
}
} }
} }
static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
{
/* allocate attribute structure, piggyback attribute name */
int name_len = write_combine ? 13 : 10;
struct bin_attribute *res_attr;
int retval;
res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
if (res_attr) {
char *res_attr_name = (char *)(res_attr + 1);
if (write_combine) {
pdev->res_attr_wc[num] = res_attr;
sprintf(res_attr_name, "resource%d_wc", num);
res_attr->mmap = pci_mmap_resource_wc;
} else {
pdev->res_attr[num] = res_attr;
sprintf(res_attr_name, "resource%d", num);
res_attr->mmap = pci_mmap_resource_uc;
}
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = pci_resource_len(pdev, num);
res_attr->private = &pdev->resource[num];
retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
} else
retval = -ENOMEM;
return retval;
}
/** /**
* pci_create_resource_files - create resource files in sysfs for @dev * pci_create_resource_files - create resource files in sysfs for @dev
* @dev: dev in question * @dev: dev in question
...@@ -557,32 +609,20 @@ static int pci_create_resource_files(struct pci_dev *pdev) ...@@ -557,32 +609,20 @@ static int pci_create_resource_files(struct pci_dev *pdev)
/* Expose the PCI resources from this device as files */ /* Expose the PCI resources from this device as files */
for (i = 0; i < PCI_ROM_RESOURCE; i++) { for (i = 0; i < PCI_ROM_RESOURCE; i++) {
struct bin_attribute *res_attr;
/* skip empty resources */ /* skip empty resources */
if (!pci_resource_len(pdev, i)) if (!pci_resource_len(pdev, i))
continue; continue;
/* allocate attribute structure, piggyback attribute name */ retval = pci_create_attr(pdev, i, 0);
res_attr = kzalloc(sizeof(*res_attr) + 10, GFP_ATOMIC); /* for prefetchable resources, create a WC mappable file */
if (res_attr) { if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH)
char *res_attr_name = (char *)(res_attr + 1); retval = pci_create_attr(pdev, i, 1);
pdev->res_attr[i] = res_attr;
sprintf(res_attr_name, "resource%d", i);
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = pci_resource_len(pdev, i);
res_attr->mmap = pci_mmap_resource;
res_attr->private = &pdev->resource[i];
retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
if (retval) { if (retval) {
pci_remove_resource_files(pdev); pci_remove_resource_files(pdev);
return retval; return retval;
} }
} else {
return -ENOMEM;
}
} }
return 0; return 0;
} }
......
...@@ -206,6 +206,7 @@ struct pci_dev { ...@@ -206,6 +206,7 @@ struct pci_dev {
struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
int rom_attr_enabled; /* has display of the rom attribute been enabled? */ int rom_attr_enabled; /* has display of the rom attribute been enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
struct list_head msi_list; struct list_head msi_list;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment