Commit 204ed652 authored by Tomasz Nowicki's avatar Tomasz Nowicki Committed by Khalid Elmously

PCI, of: Move PCI I/O space management to PCI core code

BugLink: https://bugs.launchpad.net/bugs/1797092

No functional changes in this patch.

PCI I/O space mapping code does not depend on OF; therefore it can be moved
to PCI core code.  This way we will be able to use it, e.g., in ACPI PCI
code.
Suggested-by: default avatarLorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: default avatarTomasz Nowicki <tn@semihalf.com>
Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
CC: Arnd Bergmann <arnd@arndb.de>
CC: Liviu Dudau <Liviu.Dudau@arm.com>
(cherry picked from commit c5076cfe)
Signed-off-by: default avatardann frazier <dann.frazier@canonical.com>
Acked-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
Acked-by: default avatarStefan Bader <stefan.bader@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent 8520eb36
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/pci_regs.h> #include <linux/pci_regs.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -673,121 +674,6 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, ...@@ -673,121 +674,6 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
} }
EXPORT_SYMBOL(of_get_address); EXPORT_SYMBOL(of_get_address);
#ifdef PCI_IOBASE
struct io_range {
struct list_head list;
phys_addr_t start;
resource_size_t size;
};
static LIST_HEAD(io_range_list);
static DEFINE_SPINLOCK(io_range_lock);
#endif
/*
* Record the PCI IO range (expressed as CPU physical address + size).
* Return a negative value if an error has occured, zero otherwise
*/
int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
{
int err = 0;
#ifdef PCI_IOBASE
struct io_range *range;
resource_size_t allocated_size = 0;
/* check if the range hasn't been previously recorded */
spin_lock(&io_range_lock);
list_for_each_entry(range, &io_range_list, list) {
if (addr >= range->start && addr + size <= range->start + size) {
/* range already registered, bail out */
goto end_register;
}
allocated_size += range->size;
}
/* range not registed yet, check for available space */
if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
/* if it's too big check if 64K space can be reserved */
if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
err = -E2BIG;
goto end_register;
}
size = SZ_64K;
pr_warn("Requested IO range too big, new size set to 64K\n");
}
/* add the range to the list */
range = kzalloc(sizeof(*range), GFP_ATOMIC);
if (!range) {
err = -ENOMEM;
goto end_register;
}
range->start = addr;
range->size = size;
list_add_tail(&range->list, &io_range_list);
end_register:
spin_unlock(&io_range_lock);
#endif
return err;
}
phys_addr_t pci_pio_to_address(unsigned long pio)
{
phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
#ifdef PCI_IOBASE
struct io_range *range;
resource_size_t allocated_size = 0;
if (pio > IO_SPACE_LIMIT)
return address;
spin_lock(&io_range_lock);
list_for_each_entry(range, &io_range_list, list) {
if (pio >= allocated_size && pio < allocated_size + range->size) {
address = range->start + pio - allocated_size;
break;
}
allocated_size += range->size;
}
spin_unlock(&io_range_lock);
#endif
return address;
}
unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
#ifdef PCI_IOBASE
struct io_range *res;
resource_size_t offset = 0;
unsigned long addr = -1;
spin_lock(&io_range_lock);
list_for_each_entry(res, &io_range_list, list) {
if (address >= res->start && address < res->start + res->size) {
addr = address - res->start + offset;
break;
}
offset += res->size;
}
spin_unlock(&io_range_lock);
return addr;
#else
if (address > IO_SPACE_LIMIT)
return (unsigned long)-1;
return (unsigned long) address;
#endif
}
static int __of_address_to_resource(struct device_node *dev, static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags, const __be32 *addrp, u64 size, unsigned int flags,
const char *name, struct resource *r) const char *name, struct resource *r)
......
...@@ -3034,6 +3034,121 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) ...@@ -3034,6 +3034,121 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
} }
EXPORT_SYMBOL(pci_request_regions_exclusive); EXPORT_SYMBOL(pci_request_regions_exclusive);
#ifdef PCI_IOBASE
struct io_range {
struct list_head list;
phys_addr_t start;
resource_size_t size;
};
static LIST_HEAD(io_range_list);
static DEFINE_SPINLOCK(io_range_lock);
#endif
/*
* Record the PCI IO range (expressed as CPU physical address + size).
* Return a negative value if an error has occured, zero otherwise
*/
int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
{
int err = 0;
#ifdef PCI_IOBASE
struct io_range *range;
resource_size_t allocated_size = 0;
/* check if the range hasn't been previously recorded */
spin_lock(&io_range_lock);
list_for_each_entry(range, &io_range_list, list) {
if (addr >= range->start && addr + size <= range->start + size) {
/* range already registered, bail out */
goto end_register;
}
allocated_size += range->size;
}
/* range not registed yet, check for available space */
if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
/* if it's too big check if 64K space can be reserved */
if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
err = -E2BIG;
goto end_register;
}
size = SZ_64K;
pr_warn("Requested IO range too big, new size set to 64K\n");
}
/* add the range to the list */
range = kzalloc(sizeof(*range), GFP_ATOMIC);
if (!range) {
err = -ENOMEM;
goto end_register;
}
range->start = addr;
range->size = size;
list_add_tail(&range->list, &io_range_list);
end_register:
spin_unlock(&io_range_lock);
#endif
return err;
}
phys_addr_t pci_pio_to_address(unsigned long pio)
{
phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
#ifdef PCI_IOBASE
struct io_range *range;
resource_size_t allocated_size = 0;
if (pio > IO_SPACE_LIMIT)
return address;
spin_lock(&io_range_lock);
list_for_each_entry(range, &io_range_list, list) {
if (pio >= allocated_size && pio < allocated_size + range->size) {
address = range->start + pio - allocated_size;
break;
}
allocated_size += range->size;
}
spin_unlock(&io_range_lock);
#endif
return address;
}
unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
#ifdef PCI_IOBASE
struct io_range *res;
resource_size_t offset = 0;
unsigned long addr = -1;
spin_lock(&io_range_lock);
list_for_each_entry(res, &io_range_list, list) {
if (address >= res->start && address < res->start + res->size) {
addr = address - res->start + offset;
break;
}
offset += res->size;
}
spin_unlock(&io_range_lock);
return addr;
#else
if (address > IO_SPACE_LIMIT)
return (unsigned long)-1;
return (unsigned long) address;
#endif
}
/** /**
* pci_remap_iospace - Remap the memory mapped I/O space * pci_remap_iospace - Remap the memory mapped I/O space
* @res: Resource describing the I/O space * @res: Resource describing the I/O space
......
...@@ -44,10 +44,6 @@ extern void __iomem *of_iomap(struct device_node *device, int index); ...@@ -44,10 +44,6 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
extern const __be32 *of_get_address(struct device_node *dev, int index, extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags); u64 *size, unsigned int *flags);
extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
extern unsigned long pci_address_to_pio(phys_addr_t addr);
extern phys_addr_t pci_pio_to_address(unsigned long pio);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node); struct device_node *node);
extern struct of_pci_range *of_pci_range_parser_one( extern struct of_pci_range *of_pci_range_parser_one(
...@@ -78,11 +74,6 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index, ...@@ -78,11 +74,6 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
return NULL; return NULL;
} }
static inline phys_addr_t pci_pio_to_address(unsigned long pio)
{
return 0;
}
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node) struct device_node *node)
{ {
......
...@@ -1152,6 +1152,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, ...@@ -1152,6 +1152,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
void *alignf_data); void *alignf_data);
int pci_register_io_range(phys_addr_t addr, resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
...@@ -1474,6 +1477,8 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) ...@@ -1474,6 +1477,8 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; } { return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { } static inline void pci_release_regions(struct pci_dev *dev) { }
static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
static inline void pci_block_cfg_access(struct pci_dev *dev) { } static inline void pci_block_cfg_access(struct pci_dev *dev) { }
static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
{ return 0; } { return 0; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment