Commit 2cb7b5a3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux

Pull irqdomain refactoring from Grant Likely:
 "This is the long awaited simplification of irqdomain.  It gets rid of
  the different types of irq domains and instead both linear and tree
  mappings can be supported in a single domain.  Doing this removes a
  lot of special case code and makes irq domains simpler to understand
  overall"

* tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux:
  irq: fix checkpatch error
  irqdomain: Include hwirq number in /proc/interrupts
  irqdomain: make irq_linear_revmap() a fast path again
  irqdomain: remove irq_domain_generate_simple()
  irqdomain: Refactor irq_domain_associate_many()
  irqdomain: Beef up debugfs output
  irqdomain: Clean up aftermath of irq_domain refactoring
  irqdomain: Eliminate revmap type
  irqdomain: merge linear and tree reverse mappings.
  irqdomain: Add a name field
  irqdomain: Replace LEGACY mapping with LINEAR
  irqdomain: Relax failure path on setting up mappings
parents b2c31107 798f0fd1
......@@ -239,7 +239,7 @@ void __init beatic_init_IRQ(void)
ppc_md.get_irq = beatic_get_irq;
/* Allocate an irq host */
beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
beatic_host = irq_domain_add_nomap(NULL, ~0, &beatic_pic_host_ops, NULL);
BUG_ON(beatic_host == NULL);
irq_set_default_host(beatic_host);
}
......
......@@ -192,7 +192,7 @@ static int psurge_secondary_ipi_init(void)
{
int rc = -ENOMEM;
psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL);
psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL);
if (psurge_host)
psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
......
......@@ -364,9 +364,7 @@ static void dt_add_ioapic_domain(unsigned int ioapic_num,
* and assigned so we can keep the 1:1 mapping which the ioapic
* is having.
*/
ret = irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY);
if (ret)
pr_err("Error mapping legacy IRQs: %d\n", ret);
irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY);
if (num > NR_IRQS_LEGACY) {
ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY,
......
......@@ -73,57 +73,48 @@ struct irq_domain_chip_generic;
/**
* struct irq_domain - Hardware interrupt number translation object
* @link: Element in global irq_domain list.
* @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This
* will be one of the IRQ_DOMAIN_MAP_* values.
* @revmap_data: Revmap method specific data.
* @name: Name of interrupt domain
* @ops: pointer to irq_domain methods
* @host_data: private data pointer for use by owner. Not touched by irq_domain
* core code.
* @irq_base: Start of irq_desc range assigned to the irq_domain. The creator
* of the irq_domain is responsible for allocating the array of
* irq_desc structures.
* @nr_irq: Number of irqs managed by the irq domain
* @hwirq_base: Starting number for hwirqs managed by the irq domain
* @of_node: (optional) Pointer to device tree nodes associated with the
* irq_domain. Used when decoding device tree interrupt specifiers.
*
* Optional elements
* @of_node: Pointer to device tree nodes associated with the irq_domain. Used
* when decoding device tree interrupt specifiers.
* @gc: Pointer to a list of generic chips. There is a helper function for
* setting up one or more generic chips for interrupt controllers
* drivers using the generic chip library which uses this pointer.
*
* Revmap data, used internally by irq_domain
* @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
* support direct mapping
* @revmap_size: Size of the linear map table @linear_revmap[]
* @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
* @linear_revmap: Linear table of hwirq->virq reverse mappings
*/
struct irq_domain {
struct list_head link;
/* type of reverse mapping_technique */
unsigned int revmap_type;
union {
struct {
unsigned int size;
unsigned int first_irq;
irq_hw_number_t first_hwirq;
} legacy;
struct {
unsigned int size;
unsigned int *revmap;
} linear;
struct {
unsigned int max_irq;
} nomap;
struct radix_tree_root tree;
} revmap_data;
const char *name;
const struct irq_domain_ops *ops;
void *host_data;
irq_hw_number_t inval_irq;
/* Optional device node pointer */
/* Optional data */
struct device_node *of_node;
/* Optional pointer to generic interrupt chips */
struct irq_domain_chip_generic *gc;
};
#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
* ie. legacy 8259, gets irqs 1..15 */
#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
unsigned int revmap_direct_max_irq;
unsigned int revmap_size;
struct radix_tree_root revmap_tree;
unsigned int linear_revmap[];
};
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
......@@ -135,21 +126,30 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
extern struct irq_domain *irq_find_host(struct device_node *node);
extern void irq_set_default_host(struct irq_domain *host);
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: Number of interrupts in the domain.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*/
static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
unsigned int size,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
void *host_data)
{
return __irq_domain_add(of_node, size, size, 0, ops, host_data);
}
static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data);
extern struct irq_domain *irq_find_host(struct device_node *node);
extern void irq_set_default_host(struct irq_domain *host);
void *host_data)
{
return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data);
}
static inline struct irq_domain *irq_domain_add_legacy_isa(
struct device_node *of_node,
const struct irq_domain_ops *ops,
......@@ -158,21 +158,40 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
host_data);
}
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
{
return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data);
}
extern void irq_domain_remove(struct irq_domain *host);
extern int irq_domain_associate_many(struct irq_domain *domain,
extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq);
extern void irq_domain_associate_many(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
return irq_domain_associate_many(domain, irq, hwirq, 1);
}
extern unsigned int irq_create_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
extern void irq_dispose_mapping(unsigned int virq);
/**
* irq_linear_revmap() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*
* This is a fast path alternative to irq_find_mapping() that can be
* called directly by irq controller code to save a handful of
* instructions. It is always safe to call, but won't find irqs mapped
* using the radix tree.
*/
static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
}
extern unsigned int irq_find_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
......@@ -186,9 +205,6 @@ static inline int irq_create_identity_mapping(struct irq_domain *host,
return irq_create_strict_mappings(host, hwirq, hwirq, 1);
}
extern unsigned int irq_linear_revmap(struct irq_domain *host,
irq_hw_number_t hwirq);
extern const struct irq_domain_ops irq_domain_simple_ops;
/* stock xlate functions */
......@@ -202,14 +218,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
#if defined(CONFIG_OF_IRQ)
extern void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start);
#else /* CONFIG_OF_IRQ */
static inline void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start) { }
#endif /* !CONFIG_OF_IRQ */
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
#endif /* !CONFIG_IRQ_DOMAIN */
......
......@@ -275,10 +275,7 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
if (d->gc)
return -EBUSY;
if (d->revmap_type != IRQ_DOMAIN_MAP_LINEAR)
return -EINVAL;
numchips = d->revmap_data.linear.size / irqs_per_chip;
numchips = d->revmap_size / irqs_per_chip;
if (!numchips)
return -EINVAL;
......@@ -310,6 +307,7 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
/* Calc pointer to the next generic chip */
tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
}
d->name = name;
return 0;
}
EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
......
......@@ -23,9 +23,11 @@ static DEFINE_MUTEX(revmap_trees_mutex);
static struct irq_domain *irq_default_domain;
/**
* irq_domain_alloc() - Allocate a new irq_domain data structure
* __irq_domain_add() - Allocate a new irq_domain data structure
* @of_node: optional device-tree node of the interrupt controller
* @revmap_type: type of reverse mapping to use
* @size: Size of linear map; 0 for radix mapping only
* @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
* direct mapping
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
......@@ -33,41 +35,35 @@ static struct irq_domain *irq_default_domain;
* register allocated irq_domain with irq_domain_register(). Returns pointer
* to IRQ domain, or NULL on failure.
*/
static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
unsigned int revmap_type,
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
of_node_to_nid(of_node));
domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
GFP_KERNEL, of_node_to_nid(of_node));
if (WARN_ON(!domain))
return NULL;
/* Fill structure */
domain->revmap_type = revmap_type;
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
domain->ops = ops;
domain->host_data = host_data;
domain->of_node = of_node_get(of_node);
domain->hwirq_max = hwirq_max;
domain->revmap_size = size;
domain->revmap_direct_max_irq = direct_max;
return domain;
}
static void irq_domain_free(struct irq_domain *domain)
{
of_node_put(domain->of_node);
kfree(domain);
}
static void irq_domain_add(struct irq_domain *domain)
{
mutex_lock(&irq_domain_mutex);
list_add(&domain->link, &irq_domain_list);
mutex_unlock(&irq_domain_mutex);
pr_debug("Allocated domain of type %d @0x%p\n",
domain->revmap_type, domain);
pr_debug("Added domain %s\n", domain->name);
return domain;
}
EXPORT_SYMBOL_GPL(__irq_domain_add);
/**
* irq_domain_remove() - Remove an irq domain.
......@@ -81,29 +77,12 @@ void irq_domain_remove(struct irq_domain *domain)
{
mutex_lock(&irq_domain_mutex);
switch (domain->revmap_type) {
case IRQ_DOMAIN_MAP_LEGACY:
/*
* Legacy domains don't manage their own irq_desc
* allocations, we expect the caller to handle irq_desc
* freeing on their own.
*/
break;
case IRQ_DOMAIN_MAP_TREE:
/*
* radix_tree_delete() takes care of destroying the root
* node when all entries are removed. Shout if there are
* any mappings left.
*/
WARN_ON(domain->revmap_data.tree.height);
break;
case IRQ_DOMAIN_MAP_LINEAR:
kfree(domain->revmap_data.linear.revmap);
domain->revmap_data.linear.size = 0;
break;
case IRQ_DOMAIN_MAP_NOMAP:
break;
}
WARN_ON(domain->revmap_tree.height);
list_del(&domain->link);
......@@ -115,44 +94,30 @@ void irq_domain_remove(struct irq_domain *domain)
mutex_unlock(&irq_domain_mutex);
pr_debug("Removed domain of type %d @0x%p\n",
domain->revmap_type, domain);
pr_debug("Removed domain %s\n", domain->name);
irq_domain_free(domain);
of_node_put(domain->of_node);
kfree(domain);
}
EXPORT_SYMBOL_GPL(irq_domain_remove);
static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
int size = domain->revmap_data.legacy.size;
if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
return 0;
return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
}
/**
* irq_domain_add_simple() - Allocate and register a simple irq_domain.
* irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in mapping
* @first_irq: first number of irq block assigned to the domain,
* pass zero to assign irqs on-the-fly. This will result in a
* linear IRQ domain so it is important to use irq_create_mapping()
* for each used IRQ, especially when SPARSE_IRQ is enabled.
* pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
* pre-map all of the irqs in the domain to virqs starting at first_irq.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
* Allocates a legacy irq_domain if irq_base is positive or a linear
* domain otherwise. For the legacy domain, IRQ descriptors will also
* be allocated.
* Allocates an irq_domain, and optionally if first_irq is positive then also
* allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
*
* This is intended to implement the expected behaviour for most
* interrupt controllers which is that a linear mapping should
* normally be used unless the system requires a legacy mapping in
* order to support supplying interrupt numbers during non-DT
* registration of devices.
* interrupt controllers. If device tree is used, then first_irq will be 0 and
* irqs get mapped dynamically on the fly. However, if the controller requires
* static virq assignments (non-DT boot) then it will set that up correctly.
*/
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
unsigned int size,
......@@ -160,33 +125,25 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
{
if (first_irq > 0) {
int irq_base;
struct irq_domain *domain;
domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
if (!domain)
return NULL;
if (first_irq > 0) {
if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
/*
* Set the descriptor allocator to search for a
* 1-to-1 mapping, such as irq_alloc_desc_at().
* Use of_node_to_nid() which is defined to
* numa_node_id() on platforms that have no custom
* implementation.
*/
irq_base = irq_alloc_descs(first_irq, first_irq, size,
/* attempt to allocated irq_descs */
int rc = irq_alloc_descs(first_irq, first_irq, size,
of_node_to_nid(of_node));
if (irq_base < 0) {
if (rc < 0)
pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
first_irq);
irq_base = first_irq;
}
} else
irq_base = first_irq;
return irq_domain_add_legacy(of_node, size, irq_base, 0,
ops, host_data);
irq_domain_associate_many(domain, first_irq, 0, size);
}
/* A linear domain is the default */
return irq_domain_add_linear(of_node, size, ops, host_data);
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
......@@ -213,130 +170,18 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
void *host_data)
{
struct irq_domain *domain;
unsigned int i;
domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
domain = __irq_domain_add(of_node, first_hwirq + size,
first_hwirq + size, 0, ops, host_data);
if (!domain)
return NULL;
domain->revmap_data.legacy.first_irq = first_irq;
domain->revmap_data.legacy.first_hwirq = first_hwirq;
domain->revmap_data.legacy.size = size;
mutex_lock(&irq_domain_mutex);
/* Verify that all the irqs are available */
for (i = 0; i < size; i++) {
int irq = first_irq + i;
struct irq_data *irq_data = irq_get_irq_data(irq);
if (WARN_ON(!irq_data || irq_data->domain)) {
mutex_unlock(&irq_domain_mutex);
irq_domain_free(domain);
return NULL;
}
}
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
/* Claim all of the irqs before registering a legacy domain */
for (i = 0; i < size; i++) {
struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
irq_data->hwirq = first_hwirq + i;
irq_data->domain = domain;
}
mutex_unlock(&irq_domain_mutex);
for (i = 0; i < size; i++) {
int irq = first_irq + i;
int hwirq = first_hwirq + i;
/* IRQ0 gets ignored */
if (!irq)
continue;
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
* explicitly change them
*/
if (ops->map)
ops->map(domain, irq, hwirq);
/* Clear norequest flags */
irq_clear_status_flags(irq, IRQ_NOREQUEST);
}
irq_domain_add(domain);
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: Number of interrupts in the domain.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*/
struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
unsigned int size,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
unsigned int *revmap;
revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
of_node_to_nid(of_node));
if (WARN_ON(!revmap))
return NULL;
domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
if (!domain) {
kfree(revmap);
return NULL;
}
domain->revmap_data.linear.size = size;
domain->revmap_data.linear.revmap = revmap;
irq_domain_add(domain);
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_add_linear);
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain = irq_domain_alloc(of_node,
IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
if (domain) {
domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
irq_domain_add(domain);
}
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
/**
* irq_domain_add_tree()
* @of_node: pointer to interrupt controller's device tree node.
* @ops: map/unmap domain callbacks
*
* Note: The radix tree will be allocated later during boot automatically
* (the reverse mapping will use the slow path until that happens).
*/
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain = irq_domain_alloc(of_node,
IRQ_DOMAIN_MAP_TREE, ops, host_data);
if (domain) {
INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
irq_domain_add(domain);
}
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_add_tree);
/**
* irq_find_host() - Locates a domain for a given device node
* @node: device-tree node of the interrupt controller
......@@ -385,20 +230,14 @@ void irq_set_default_host(struct irq_domain *domain)
}
EXPORT_SYMBOL_GPL(irq_set_default_host);
static void irq_domain_disassociate_many(struct irq_domain *domain,
unsigned int irq_base, int count)
static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{
/*
* disassociate in reverse order;
* not strictly necessary, but nice for unwinding
*/
while (count--) {
int irq = irq_base + count;
struct irq_data *irq_data = irq_get_irq_data(irq);
irq_hw_number_t hwirq;
if (WARN_ON(!irq_data || irq_data->domain != domain))
continue;
if (WARN(!irq_data || irq_data->domain != domain,
"virq%i doesn't exist; cannot disassociate\n", irq))
return;
hwirq = irq_data->hwirq;
irq_set_status_flags(irq, IRQ_NOREQUEST);
......@@ -417,45 +256,31 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
irq_data->domain = NULL;
irq_data->hwirq = 0;
/* Clear reverse map */
switch(domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = 0;
break;
case IRQ_DOMAIN_MAP_TREE:
/* Clear reverse map for this hwirq */
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_data.tree, hwirq);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
break;
}
}
}
int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
unsigned int virq = irq_base;
irq_hw_number_t hwirq = hwirq_base;
int i, ret;
pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
for (i = 0; i < count; i++) {
struct irq_data *irq_data = irq_get_irq_data(virq + i);
struct irq_data *irq_data = irq_get_irq_data(virq);
int ret;
if (WARN(!irq_data, "error: irq_desc not allocated; "
"irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
if (WARN(hwirq >= domain->hwirq_max,
"error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
return -EINVAL;
if (WARN(irq_data->domain, "error: irq_desc already associated; "
"irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
if (WARN(!irq_data, "error: virq%i is not allocated", virq))
return -EINVAL;
if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
return -EINVAL;
};
for (i = 0; i < count; i++, virq++, hwirq++) {
struct irq_data *irq_data = irq_get_irq_data(virq);
mutex_lock(&irq_domain_mutex);
irq_data->hwirq = hwirq;
irq_data->domain = domain;
if (domain->ops->map) {
......@@ -464,46 +289,49 @@ int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
/*
* If map() returns -EPERM, this interrupt is protected
* by the firmware or some other service and shall not
* be mapped.
*
* Since on some platforms we blindly try to map everything
* we end up with a log full of backtraces.
*
* So instead, we silently fail on -EPERM, it is the
* responsibility of the PIC driver to display a relevant
* message if needed.
* be mapped. Don't bother telling the user about it.
*/
if (ret != -EPERM) {
pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
virq, hwirq, ret);
WARN_ON(1);
pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
domain->name, hwirq, virq, ret);
}
irq_data->domain = NULL;
irq_data->hwirq = 0;
goto err_unmap;
mutex_unlock(&irq_domain_mutex);
return ret;
}
/* If not already assigned, give the domain the chip's name */
if (!domain->name && irq_data->chip)
domain->name = irq_data->chip->name;
}
switch (domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = virq;
break;
case IRQ_DOMAIN_MAP_TREE:
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = virq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
break;
}
mutex_unlock(&irq_domain_mutex);
irq_clear_status_flags(virq, IRQ_NOREQUEST);
}
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_associate);
err_unmap:
irq_domain_disassociate_many(domain, irq_base, i);
return -EINVAL;
void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{
int i;
pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
for (i = 0; i < count; i++) {
irq_domain_associate(domain, irq_base + i, hwirq_base + i);
}
}
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
......@@ -513,7 +341,9 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many);
*
* This routine is used for irq controllers which can choose the hardware
* interrupt numbers they generate. In such a case it's simplest to use
* the linux irq as the hardware interrupt number.
* the linux irq as the hardware interrupt number. It still uses the linear
* or radix tree to store the mapping, but the irq controller can optimize
* the revmap path by using the hwirq directly.
*/
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
{
......@@ -522,17 +352,14 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
if (domain == NULL)
domain = irq_default_domain;
if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
return 0;
virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
if (!virq) {
pr_debug("create_direct virq allocation failed\n");
return 0;
}
if (virq >= domain->revmap_data.nomap.max_irq) {
if (virq >= domain->revmap_direct_max_irq) {
pr_err("ERROR: no free irqs available below %i maximum\n",
domain->revmap_data.nomap.max_irq);
domain->revmap_direct_max_irq);
irq_free_desc(virq);
return 0;
}
......@@ -569,9 +396,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
if (domain == NULL)
domain = irq_default_domain;
if (domain == NULL) {
pr_warning("irq_create_mapping called for"
" NULL domain, hwirq=%lx\n", hwirq);
WARN_ON(1);
WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
return 0;
}
pr_debug("-> using domain @%p\n", domain);
......@@ -583,10 +408,6 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
return virq;
}
/* Get a virtual interrupt number */
if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
return irq_domain_legacy_revmap(domain, hwirq);
/* Allocate a virtual interrupt number */
hint = hwirq % nr_irqs;
if (hint == 0)
......@@ -639,12 +460,7 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
if (unlikely(ret < 0))
return ret;
ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
if (unlikely(ret < 0)) {
irq_free_descs(irq_base, count);
return ret;
}
irq_domain_associate_many(domain, irq_base, hwirq_base, count);
return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
......@@ -671,7 +487,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
if (intsize > 0)
return intspec[0];
#endif
pr_warning("no irq domain found for %s !\n",
pr_warn("no irq domain found for %s !\n",
of_node_full_name(controller));
return 0;
}
......@@ -714,11 +530,7 @@ void irq_dispose_mapping(unsigned int virq)
if (WARN_ON(domain == NULL))
return;
/* Never unmap legacy interrupts */
if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
return;
irq_domain_disassociate_many(domain, virq, 1);
irq_domain_disassociate(domain, virq);
irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
......@@ -739,63 +551,51 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
if (domain == NULL)
return 0;
switch (domain->revmap_type) {
case IRQ_DOMAIN_MAP_LEGACY:
return irq_domain_legacy_revmap(domain, hwirq);
case IRQ_DOMAIN_MAP_LINEAR:
return irq_linear_revmap(domain, hwirq);
case IRQ_DOMAIN_MAP_TREE:
rcu_read_lock();
data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
rcu_read_unlock();
if (data)
return data->irq;
break;
case IRQ_DOMAIN_MAP_NOMAP:
if (hwirq < domain->revmap_direct_max_irq) {
data = irq_get_irq_data(hwirq);
if (data && (data->domain == domain) && (data->hwirq == hwirq))
return hwirq;
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
/**
* irq_linear_revmap() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*
* This is a fast path that can be called directly by irq controller code to
* save a handful of instructions.
*/
unsigned int irq_linear_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
/* Check revmap bounds; complain if exceeded */
if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
return 0;
/* Check if the hwirq is in the linear revmap. */
if (hwirq < domain->revmap_size)
return domain->linear_revmap[hwirq];
return domain->revmap_data.linear.revmap[hwirq];
rcu_read_lock();
data = radix_tree_lookup(&domain->revmap_tree, hwirq);
rcu_read_unlock();
return data ? data->irq : 0;
}
EXPORT_SYMBOL_GPL(irq_linear_revmap);
EXPORT_SYMBOL_GPL(irq_find_mapping);
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
unsigned long flags;
struct irq_desc *desc;
const char *p;
static const char none[] = "none";
void *data;
struct irq_domain *domain;
struct radix_tree_iter iter;
void *data, **slot;
int i;
seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
"name", "mapped", "linear-max", "direct-max", "devtree-node");
mutex_lock(&irq_domain_mutex);
list_for_each_entry(domain, &irq_domain_list, link) {
int count = 0;
radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
count++;
seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
domain == irq_default_domain ? '*' : ' ', domain->name,
domain->revmap_size + count, domain->revmap_size,
domain->revmap_direct_max_irq,
domain->of_node ? of_node_full_name(domain->of_node) : "");
}
mutex_unlock(&irq_domain_mutex);
seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
"chip name", (int)(2 * sizeof(void *) + 2), "chip data",
"domain name");
"active", "type", "domain");
for (i = 1; i < nr_irqs; i++) {
desc = irq_to_desc(i);
......@@ -803,28 +603,28 @@ static int virq_debug_show(struct seq_file *m, void *private)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
domain = desc->irq_data.domain;
if (desc->action && desc->action->handler) {
if (domain) {
struct irq_chip *chip;
int hwirq = desc->irq_data.hwirq;
bool direct;
seq_printf(m, "%5d ", i);
seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
seq_printf(m, "0x%05x ", hwirq);
chip = irq_desc_get_chip(desc);
if (chip && chip->name)
p = chip->name;
else
p = none;
seq_printf(m, "%-15s ", p);
seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
data = irq_desc_get_chip_data(desc);
seq_printf(m, data ? "0x%p " : " %p ", data);
if (desc->irq_data.domain)
p = of_node_full_name(desc->irq_data.domain->of_node);
else
p = none;
seq_printf(m, "%s\n", p);
seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
seq_printf(m, "%6s%-8s ",
(hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
direct ? "(DIRECT)" : "");
seq_printf(m, "%s\n", desc->irq_data.domain->name);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
......@@ -921,18 +721,3 @@ const struct irq_domain_ops irq_domain_simple_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
#ifdef CONFIG_OF_IRQ
void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start)
{
struct device_node *node;
pr_debug("looking for phys_base=%llx, irq_start=%i\n",
(unsigned long long) phys_base, (int) irq_start);
node = of_find_matching_node_by_address(NULL, match, phys_base);
if (node)
irq_domain_add_legacy(node, 32, irq_start, 0,
&irq_domain_simple_ops, NULL);
}
EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
#endif
......@@ -462,6 +462,8 @@ int show_interrupts(struct seq_file *p, void *v)
} else {
seq_printf(p, " %8s", "None");
}
if (desc->irq_data.domain)
seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment