Commit fb7d1bcf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pci-v4.18-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci

Pull PCI fixes from Bjorn Helgaas:

 - Fix crashes that happen when PHY drivers are left disabled in the V3
   Semiconductor, MediaTek, Faraday, Aardvark, DesignWare, Versatile,
   and X-Gene host controller drivers (Sergei Shtylyov)

 - Fix a NULL pointer dereference in the endpoint library configfs
   support (Kishon Vijay Abraham I)

 - Fix a race condition in Hyper-V IRQ handling (Dexuan Cui)

* tag 'pci-v4.18-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci:
  PCI: v3-semi: Fix I/O space page leak
  PCI: mediatek: Fix I/O space page leak
  PCI: faraday: Fix I/O space page leak
  PCI: aardvark: Fix I/O space page leak
  PCI: designware: Fix I/O space page leak
  PCI: versatile: Fix I/O space page leak
  PCI: xgene: Fix I/O space page leak
  PCI: OF: Fix I/O space page leak
  PCI: endpoint: Fix NULL pointer dereference error when CONFIGFS is disabled
  PCI: hv: Disable/enable IRQs rather than BH in hv_compose_msi_msg()
parents f39f28ff 270ed733
...@@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp) ...@@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
switch (resource_type(win->res)) { switch (resource_type(win->res)) {
case IORESOURCE_IO: case IORESOURCE_IO:
ret = pci_remap_iospace(win->res, pp->io_base); ret = devm_pci_remap_iospace(dev, win->res,
pp->io_base);
if (ret) { if (ret) {
dev_warn(dev, "Error %d: failed to map resource %pR\n", dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res); ret, win->res);
......
...@@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) ...@@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
0, 0xF8000000, 0, 0, 0xF8000000, 0,
lower_32_bits(res->start), lower_32_bits(res->start),
OB_PCIE_IO); OB_PCIE_IO);
err = pci_remap_iospace(res, iobase); err = devm_pci_remap_iospace(dev, res, iobase);
if (err) { if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n", dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res); err, res);
......
...@@ -503,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev) ...@@ -503,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
dev_err(dev, "illegal IO mem size\n"); dev_err(dev, "illegal IO mem size\n");
return -EINVAL; return -EINVAL;
} }
ret = pci_remap_iospace(io, io_base); ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) { if (ret) {
dev_warn(dev, "error %d: failed to map resource %pR\n", dev_warn(dev, "error %d: failed to map resource %pR\n",
ret, io); ret, io);
......
...@@ -1073,6 +1073,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ...@@ -1073,6 +1073,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_bus *pbus; struct pci_bus *pbus;
struct pci_dev *pdev; struct pci_dev *pdev;
struct cpumask *dest; struct cpumask *dest;
unsigned long flags;
struct compose_comp_ctxt comp; struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc; struct tran_int_desc *int_desc;
struct { struct {
...@@ -1164,14 +1165,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ...@@ -1164,14 +1165,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* the channel callback directly when channel->target_cpu is * the channel callback directly when channel->target_cpu is
* the current CPU. When the higher level interrupt code * the current CPU. When the higher level interrupt code
* calls us with interrupt enabled, let's add the * calls us with interrupt enabled, let's add the
* local_bh_disable()/enable() to avoid race. * local_irq_save()/restore() to avoid race:
* hv_pci_onchannelcallback() can also run in tasklet.
*/ */
local_bh_disable(); local_irq_save(flags);
if (hbus->hdev->channel->target_cpu == smp_processor_id()) if (hbus->hdev->channel->target_cpu == smp_processor_id())
hv_pci_onchannelcallback(hbus); hv_pci_onchannelcallback(hbus);
local_bh_enable(); local_irq_restore(flags);
if (hpdev->state == hv_pcichild_ejecting) { if (hpdev->state == hv_pcichild_ejecting) {
dev_err_once(&hbus->hdev->device, dev_err_once(&hbus->hdev->device,
......
...@@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3, ...@@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
v3->io_bus_addr = io->start - win->offset; v3->io_bus_addr = io->start - win->offset;
dev_dbg(dev, "I/O window %pR, bus addr %pap\n", dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
io, &v3->io_bus_addr); io, &v3->io_bus_addr);
ret = pci_remap_iospace(io, io_base); ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) { if (ret) {
dev_warn(dev, dev_warn(dev,
"error %d: failed to map resource %pR\n", "error %d: failed to map resource %pR\n",
......
...@@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, ...@@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
switch (resource_type(res)) { switch (resource_type(res)) {
case IORESOURCE_IO: case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase); err = devm_pci_remap_iospace(dev, res, iobase);
if (err) { if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n", dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res); err, res);
......
...@@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, ...@@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
case IORESOURCE_IO: case IORESOURCE_IO:
xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
res->start - window->offset); res->start - window->offset);
ret = pci_remap_iospace(res, io_base); ret = devm_pci_remap_iospace(dev, res, io_base);
if (ret < 0) if (ret < 0)
return ret; return ret;
break; break;
......
...@@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie) ...@@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
if (err < 0) if (err < 0)
return err; return err;
pci_remap_iospace(&pcie->pio, pcie->io.start); devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
return 0; return 0;
} }
......
...@@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) ...@@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
} }
EXPORT_SYMBOL_GPL(pci_epf_alloc_space); EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
{
struct config_group *group, *tmp;
if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
return;
mutex_lock(&pci_epf_mutex);
list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
pci_ep_cfs_remove_epf_group(group);
list_del(&driver->epf_group);
mutex_unlock(&pci_epf_mutex);
}
/** /**
* pci_epf_unregister_driver() - unregister the PCI EPF driver * pci_epf_unregister_driver() - unregister the PCI EPF driver
* @driver: the PCI EPF driver that has to be unregistered * @driver: the PCI EPF driver that has to be unregistered
...@@ -145,16 +159,37 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space); ...@@ -145,16 +159,37 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
*/ */
void pci_epf_unregister_driver(struct pci_epf_driver *driver) void pci_epf_unregister_driver(struct pci_epf_driver *driver)
{ {
struct config_group *group, *tmp; pci_epf_remove_cfs(driver);
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
static int pci_epf_add_cfs(struct pci_epf_driver *driver)
{
struct config_group *group;
const struct pci_epf_device_id *id;
if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
return 0;
INIT_LIST_HEAD(&driver->epf_group);
id = driver->id_table;
while (id->name[0]) {
group = pci_ep_cfs_add_epf_group(id->name);
if (IS_ERR(group)) {
pci_epf_remove_cfs(driver);
return PTR_ERR(group);
}
mutex_lock(&pci_epf_mutex); mutex_lock(&pci_epf_mutex);
list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) list_add_tail(&group->group_entry, &driver->epf_group);
pci_ep_cfs_remove_epf_group(group);
list_del(&driver->epf_group);
mutex_unlock(&pci_epf_mutex); mutex_unlock(&pci_epf_mutex);
driver_unregister(&driver->driver); id++;
}
return 0;
} }
EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
/** /**
* __pci_epf_register_driver() - register a new PCI EPF driver * __pci_epf_register_driver() - register a new PCI EPF driver
...@@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver, ...@@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner) struct module *owner)
{ {
int ret; int ret;
struct config_group *group;
const struct pci_epf_device_id *id;
if (!driver->ops) if (!driver->ops)
return -EINVAL; return -EINVAL;
...@@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver, ...@@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (ret) if (ret)
return ret; return ret;
INIT_LIST_HEAD(&driver->epf_group); pci_epf_add_cfs(driver);
id = driver->id_table;
while (id->name[0]) {
group = pci_ep_cfs_add_epf_group(id->name);
mutex_lock(&pci_epf_mutex);
list_add_tail(&group->group_entry, &driver->epf_group);
mutex_unlock(&pci_epf_mutex);
id++;
}
return 0; return 0;
} }
......
...@@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev, ...@@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
switch (resource_type(res)) { switch (resource_type(res)) {
case IORESOURCE_IO: case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase); err = devm_pci_remap_iospace(dev, res, iobase);
if (err) { if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n", dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res); err, res);
......
...@@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res) ...@@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res)
} }
EXPORT_SYMBOL(pci_unmap_iospace); EXPORT_SYMBOL(pci_unmap_iospace);
static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
{
struct resource **res = ptr;
pci_unmap_iospace(*res);
}
/**
* devm_pci_remap_iospace - Managed pci_remap_iospace()
* @dev: Generic device to remap IO address for
* @res: Resource describing the I/O space
* @phys_addr: physical address of range to be mapped
*
* Managed pci_remap_iospace(). Map is automatically unmapped on driver
* detach.
*/
int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
phys_addr_t phys_addr)
{
const struct resource **ptr;
int error;
ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
error = pci_remap_iospace(res, phys_addr);
if (error) {
devres_free(ptr);
} else {
*ptr = res;
devres_add(dev, ptr);
}
return error;
}
EXPORT_SYMBOL(devm_pci_remap_iospace);
/** /**
* devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
* @dev: Generic device to remap IO address for * @dev: Generic device to remap IO address for
......
...@@ -1240,6 +1240,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, ...@@ -1240,6 +1240,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
unsigned long pci_address_to_pio(phys_addr_t addr); unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio); phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res); void pci_unmap_iospace(struct resource *res);
void __iomem *devm_pci_remap_cfgspace(struct device *dev, void __iomem *devm_pci_remap_cfgspace(struct device *dev,
resource_size_t offset, resource_size_t offset,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment