Commit 331dd19e authored by Jiang Liu's avatar Jiang Liu Committed by Thomas Gleixner

x86/uv: Use new irqdomain interfaces to allocate/free IRQ

Use new irqdomain interfaces to allocate/free IRQ, so we can
remove GENERIC_IRQ_LEGACY_ALLOC_HWIRQ later.
Signed-off-by: default avatarJiang Liu <jiang.liu@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Cohen <david.a.cohen@linux.intel.com>
Cc: Sander Eikelenboom <linux@eikelenboom.it>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dimitri Sivanich <sivanich@sgi.com>
Link: http://lkml.kernel.org/r/1428905519-23704-6-git-send-email-jiang.liu@linux.intel.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 4c8f9960
......@@ -12,6 +12,7 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <asm/apic.h>
#include <asm/uv/uv_irq.h>
......@@ -130,24 +131,14 @@ static int
arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_cfg *cfg = irq_cfg(irq);
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode, err;
unsigned int dest;
int mmr_pnode;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
err = assign_irq_vector(irq, cfg, eligible_cpu);
if (err != 0)
return err;
err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
if (err != 0)
return err;
if (limit == UV_AFFINITY_CPU)
irq_set_status_flags(irq, IRQ_NO_BALANCING);
else
......@@ -164,7 +155,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
entry->dest = dest;
entry->dest = cfg->dest_apicid;
mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
......@@ -238,9 +229,13 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
int ret, irq = irq_alloc_hwirq(uv_blade_to_memory_nid(mmr_blade));
int ret, irq;
struct irq_alloc_info info;
if (!irq)
init_irq_alloc_info(&info, cpumask_of(cpu));
irq = irq_domain_alloc_irqs(NULL, 1, uv_blade_to_memory_nid(mmr_blade),
&info);
if (irq <= 0)
return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
......@@ -248,7 +243,7 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
if (ret == irq)
uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
else
irq_free_hwirq(irq);
irq_domain_free_irqs(irq, 1);
return ret;
}
......@@ -283,6 +278,6 @@ void uv_teardown_irq(unsigned int irq)
n = n->rb_right;
}
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
irq_free_hwirq(irq);
irq_domain_free_irqs(irq, 1);
}
EXPORT_SYMBOL_GPL(uv_teardown_irq);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment