Commit 4a60cfa9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of...

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (96 commits)
  apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets
  apic, x86: Check if EILVT APIC registers are available (AMD only)
  x86: ioapic: Call free_irte only if interrupt remapping enabled
  arm: Use ARCH_IRQ_INIT_FLAGS
  genirq, ARM: Fix boot on ARM platforms
  genirq: Fix CONFIG_GENIRQ_NO_DEPRECATED=y build
  x86: Switch sparse_irq allocations to GFP_KERNEL
  genirq: Switch sparse_irq allocator to GFP_KERNEL
  genirq: Make sparse_lock a mutex
  x86: lguest: Use new irq allocator
  genirq: Remove the now unused sparse irq leftovers
  genirq: Sanitize dynamic irq handling
  genirq: Remove arch_init_chip_data()
  x86: xen: Sanitise sparse_irq handling
  x86: Use sane enumeration
  x86: uv: Clean up the direct access to irq_desc
  x86: Make io_apic.c local functions static
  genirq: Remove irq_2_iommu
  x86: Speed up the irq_remapped check in hot pathes
  intr_remap: Simplify the code further
  ...

Fix up trivial conflicts in arch/x86/Kconfig
parents 62bea97f 27afdf20
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
</authorgroup> </authorgroup>
<copyright> <copyright>
<year>2005-2006</year> <year>2005-2010</year>
<holder>Thomas Gleixner</holder> <holder>Thomas Gleixner</holder>
</copyright> </copyright>
<copyright> <copyright>
...@@ -100,6 +100,10 @@ ...@@ -100,6 +100,10 @@
<listitem><para>Edge type</para></listitem> <listitem><para>Edge type</para></listitem>
<listitem><para>Simple type</para></listitem> <listitem><para>Simple type</para></listitem>
</itemizedlist> </itemizedlist>
During the implementation we identified another type:
<itemizedlist>
<listitem><para>Fast EOI type</para></listitem>
</itemizedlist>
In the SMP world of the __do_IRQ() super-handler another type In the SMP world of the __do_IRQ() super-handler another type
was identified: was identified:
<itemizedlist> <itemizedlist>
...@@ -153,6 +157,7 @@ ...@@ -153,6 +157,7 @@
is still available. This leads to a kind of duality for the time is still available. This leads to a kind of duality for the time
being. Over time the new model should be used in more and more being. Over time the new model should be used in more and more
architectures, as it enables smaller and cleaner IRQ subsystems. architectures, as it enables smaller and cleaner IRQ subsystems.
It's deprecated for three years now and about to be removed.
</para> </para>
</chapter> </chapter>
<chapter id="bugs"> <chapter id="bugs">
...@@ -217,6 +222,7 @@ ...@@ -217,6 +222,7 @@
<itemizedlist> <itemizedlist>
<listitem><para>handle_level_irq</para></listitem> <listitem><para>handle_level_irq</para></listitem>
<listitem><para>handle_edge_irq</para></listitem> <listitem><para>handle_edge_irq</para></listitem>
<listitem><para>handle_fasteoi_irq</para></listitem>
<listitem><para>handle_simple_irq</para></listitem> <listitem><para>handle_simple_irq</para></listitem>
<listitem><para>handle_percpu_irq</para></listitem> <listitem><para>handle_percpu_irq</para></listitem>
</itemizedlist> </itemizedlist>
...@@ -233,33 +239,33 @@ ...@@ -233,33 +239,33 @@
are used by the default flow implementations. are used by the default flow implementations.
The following helper functions are implemented (simplified excerpt): The following helper functions are implemented (simplified excerpt):
<programlisting> <programlisting>
default_enable(irq) default_enable(struct irq_data *data)
{ {
desc->chip->unmask(irq); desc->chip->irq_unmask(data);
} }
default_disable(irq) default_disable(struct irq_data *data)
{ {
if (!delay_disable(irq)) if (!delay_disable(data))
desc->chip->mask(irq); desc->chip->irq_mask(data);
} }
default_ack(irq) default_ack(struct irq_data *data)
{ {
chip->ack(irq); chip->irq_ack(data);
} }
default_mask_ack(irq) default_mask_ack(struct irq_data *data)
{ {
if (chip->mask_ack) { if (chip->irq_mask_ack) {
chip->mask_ack(irq); chip->irq_mask_ack(data);
} else { } else {
chip->mask(irq); chip->irq_mask(data);
chip->ack(irq); chip->irq_ack(data);
} }
} }
noop(irq) noop(struct irq_data *data))
{ {
} }
...@@ -278,12 +284,27 @@ noop(irq) ...@@ -278,12 +284,27 @@ noop(irq)
<para> <para>
The following control flow is implemented (simplified excerpt): The following control flow is implemented (simplified excerpt):
<programlisting> <programlisting>
desc->chip->start(); desc->chip->irq_mask();
handle_IRQ_event(desc->action); handle_IRQ_event(desc->action);
desc->chip->end(); desc->chip->irq_unmask();
</programlisting> </programlisting>
</para> </para>
</sect3> </sect3>
<sect3 id="Default_FASTEOI_IRQ_flow_handler">
<title>Default Fast EOI IRQ flow handler</title>
<para>
handle_fasteoi_irq provides a generic implementation
for interrupts, which only need an EOI at the end of
the handler
</para>
<para>
The following control flow is implemented (simplified excerpt):
<programlisting>
handle_IRQ_event(desc->action);
desc->chip->irq_eoi();
</programlisting>
</para>
</sect3>
<sect3 id="Default_Edge_IRQ_flow_handler"> <sect3 id="Default_Edge_IRQ_flow_handler">
<title>Default Edge IRQ flow handler</title> <title>Default Edge IRQ flow handler</title>
<para> <para>
...@@ -294,20 +315,19 @@ desc->chip->end(); ...@@ -294,20 +315,19 @@ desc->chip->end();
The following control flow is implemented (simplified excerpt): The following control flow is implemented (simplified excerpt):
<programlisting> <programlisting>
if (desc->status &amp; running) { if (desc->status &amp; running) {
desc->chip->hold(); desc->chip->irq_mask();
desc->status |= pending | masked; desc->status |= pending | masked;
return; return;
} }
desc->chip->start(); desc->chip->irq_ack();
desc->status |= running; desc->status |= running;
do { do {
if (desc->status &amp; masked) if (desc->status &amp; masked)
desc->chip->enable(); desc->chip->irq_unmask();
desc->status &amp;= ~pending; desc->status &amp;= ~pending;
handle_IRQ_event(desc->action); handle_IRQ_event(desc->action);
} while (status &amp; pending); } while (status &amp; pending);
desc->status &amp;= ~running; desc->status &amp;= ~running;
desc->chip->end();
</programlisting> </programlisting>
</para> </para>
</sect3> </sect3>
...@@ -342,9 +362,9 @@ handle_IRQ_event(desc->action); ...@@ -342,9 +362,9 @@ handle_IRQ_event(desc->action);
<para> <para>
The following control flow is implemented (simplified excerpt): The following control flow is implemented (simplified excerpt):
<programlisting> <programlisting>
desc->chip->start();
handle_IRQ_event(desc->action); handle_IRQ_event(desc->action);
desc->chip->end(); if (desc->chip->irq_eoi)
desc->chip->irq_eoi();
</programlisting> </programlisting>
</para> </para>
</sect3> </sect3>
...@@ -375,8 +395,7 @@ desc->chip->end(); ...@@ -375,8 +395,7 @@ desc->chip->end();
mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when
you want to use the delayed interrupt disable feature and your you want to use the delayed interrupt disable feature and your
hardware is not capable of retriggering an interrupt.) hardware is not capable of retriggering an interrupt.)
The delayed interrupt disable can be runtime enabled, per interrupt, The delayed interrupt disable is not configurable.
by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field.
</para> </para>
</sect2> </sect2>
</sect1> </sect1>
...@@ -387,13 +406,13 @@ desc->chip->end(); ...@@ -387,13 +406,13 @@ desc->chip->end();
contains all the direct chip relevant functions, which contains all the direct chip relevant functions, which
can be utilized by the irq flow implementations. can be utilized by the irq flow implementations.
<itemizedlist> <itemizedlist>
<listitem><para>ack()</para></listitem> <listitem><para>irq_ack()</para></listitem>
<listitem><para>mask_ack() - Optional, recommended for performance</para></listitem> <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem>
<listitem><para>mask()</para></listitem> <listitem><para>irq_mask()</para></listitem>
<listitem><para>unmask()</para></listitem> <listitem><para>irq_unmask()</para></listitem>
<listitem><para>retrigger() - Optional</para></listitem> <listitem><para>irq_retrigger() - Optional</para></listitem>
<listitem><para>set_type() - Optional</para></listitem> <listitem><para>irq_set_type() - Optional</para></listitem>
<listitem><para>set_wake() - Optional</para></listitem> <listitem><para>irq_set_wake() - Optional</para></listitem>
</itemizedlist> </itemizedlist>
These primitives are strictly intended to mean what they say: ack means These primitives are strictly intended to mean what they say: ack means
ACK, masking means masking of an IRQ line, etc. It is up to the flow ACK, masking means masking of an IRQ line, etc. It is up to the flow
...@@ -458,6 +477,7 @@ desc->chip->end(); ...@@ -458,6 +477,7 @@ desc->chip->end();
<para> <para>
This chapter contains the autogenerated documentation of the internal functions. This chapter contains the autogenerated documentation of the internal functions.
</para> </para>
!Ikernel/irq/irqdesc.c
!Ikernel/irq/handle.c !Ikernel/irq/handle.c
!Ikernel/irq/chip.c !Ikernel/irq/chip.c
</chapter> </chapter>
......
...@@ -3241,6 +3241,12 @@ F: drivers/net/irda/ ...@@ -3241,6 +3241,12 @@ F: drivers/net/irda/
F: include/net/irda/ F: include/net/irda/
F: net/irda/ F: net/irda/
IRQ SUBSYSTEM
M: Thomas Gleixner <tglx@linutronix.de>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
F: kernel/irq/
ISAPNP ISAPNP
M: Jaroslav Kysela <perex@perex.cz> M: Jaroslav Kysela <perex@perex.cz>
S: Maintained S: Maintained
......
...@@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags); ...@@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags);
#define IRQF_PROBE (1 << 1) #define IRQF_PROBE (1 << 1)
#define IRQF_NOAUTOEN (1 << 2) #define IRQF_NOAUTOEN (1 << 2)
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
#endif #endif
...@@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) ...@@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
struct irq_desc *desc;
int irq;
for (irq = 0; irq < nr_irqs; irq++) {
desc = irq_to_desc_alloc_node(irq, 0);
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
}
init_arch_irq(); init_arch_irq();
} }
...@@ -169,7 +161,7 @@ void __init init_IRQ(void) ...@@ -169,7 +161,7 @@ void __init init_IRQ(void)
int __init arch_probe_nr_irqs(void) int __init arch_probe_nr_irqs(void)
{ {
nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
return 0; return nr_irqs;
} }
#endif #endif
......
...@@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq) ...@@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq)
} }
static struct irq_chip bcmring_irq0_chip = { static struct irq_chip bcmring_irq0_chip = {
.typename = "ARM-INTC0", .name = "ARM-INTC0",
.ack = bcmring_mask_irq0, .ack = bcmring_mask_irq0,
.mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */
.unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */
}; };
static struct irq_chip bcmring_irq1_chip = { static struct irq_chip bcmring_irq1_chip = {
.typename = "ARM-INTC1", .name = "ARM-INTC1",
.ack = bcmring_mask_irq1, .ack = bcmring_mask_irq1,
.mask = bcmring_mask_irq1, .mask = bcmring_mask_irq1,
.unmask = bcmring_unmask_irq1, .unmask = bcmring_unmask_irq1,
}; };
static struct irq_chip bcmring_irq2_chip = { static struct irq_chip bcmring_irq2_chip = {
.typename = "ARM-SINTC", .name = "ARM-SINTC",
.ack = bcmring_mask_irq2, .ack = bcmring_mask_irq2,
.mask = bcmring_mask_irq2, .mask = bcmring_mask_irq2,
.unmask = bcmring_unmask_irq2, .unmask = bcmring_unmask_irq2,
......
...@@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq) ...@@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq)
static struct irq_chip iop13xx_msi_chip = { static struct irq_chip iop13xx_msi_chip = {
.name = "PCI-MSI", .name = "PCI-MSI",
.ack = iop13xx_msi_nop, .ack = iop13xx_msi_nop,
.enable = unmask_msi_irq, .irq_enable = unmask_msi_irq,
.disable = mask_msi_irq, .irq_disable = mask_msi_irq,
.mask = mask_msi_irq, .irq_mask = mask_msi_irq,
.unmask = unmask_msi_irq, .irq_unmask = unmask_msi_irq,
}; };
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
......
...@@ -104,8 +104,8 @@ static int ia64_msi_retrigger_irq(unsigned int irq) ...@@ -104,8 +104,8 @@ static int ia64_msi_retrigger_irq(unsigned int irq)
*/ */
static struct irq_chip ia64_msi_chip = { static struct irq_chip ia64_msi_chip = {
.name = "PCI-MSI", .name = "PCI-MSI",
.mask = mask_msi_irq, .irq_mask = mask_msi_irq,
.unmask = unmask_msi_irq, .irq_unmask = unmask_msi_irq,
.ack = ia64_ack_msi_irq, .ack = ia64_ack_msi_irq,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.set_affinity = ia64_set_msi_irq_affinity, .set_affinity = ia64_set_msi_irq_affinity,
...@@ -160,8 +160,8 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) ...@@ -160,8 +160,8 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
static struct irq_chip dmar_msi_type = { static struct irq_chip dmar_msi_type = {
.name = "DMAR_MSI", .name = "DMAR_MSI",
.unmask = dmar_msi_unmask, .irq_unmask = dmar_msi_unmask,
.mask = dmar_msi_mask, .irq_mask = dmar_msi_mask,
.ack = ia64_ack_msi_irq, .ack = ia64_ack_msi_irq,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.set_affinity = dmar_msi_set_affinity, .set_affinity = dmar_msi_set_affinity,
......
...@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(unsigned int irq) ...@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(unsigned int irq)
static struct irq_chip sn_msi_chip = { static struct irq_chip sn_msi_chip = {
.name = "PCI-MSI", .name = "PCI-MSI",
.mask = mask_msi_irq, .irq_mask = mask_msi_irq,
.unmask = unmask_msi_irq, .irq_unmask = unmask_msi_irq,
.ack = sn_ack_msi_irq, .ack = sn_ack_msi_irq,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.set_affinity = sn_set_msi_irq_affinity, .set_affinity = sn_set_msi_irq_affinity,
......
...@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif #endif
seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name); seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next) for (action=action->next; action; action = action->next)
......
...@@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq) ...@@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq)
static struct irq_chip m32104ut_irq_type = static struct irq_chip m32104ut_irq_type =
{ {
.typename = "M32104UT-IRQ", .name = "M32104UT-IRQ",
.startup = startup_m32104ut_irq, .startup = startup_m32104ut_irq,
.shutdown = shutdown_m32104ut_irq, .shutdown = shutdown_m32104ut_irq,
.enable = enable_m32104ut_irq, .enable = enable_m32104ut_irq,
......
...@@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq) ...@@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq)
static struct irq_chip m32700ut_irq_type = static struct irq_chip m32700ut_irq_type =
{ {
.typename = "M32700UT-IRQ", .name = "M32700UT-IRQ",
.startup = startup_m32700ut_irq, .startup = startup_m32700ut_irq,
.shutdown = shutdown_m32700ut_irq, .shutdown = shutdown_m32700ut_irq,
.enable = enable_m32700ut_irq, .enable = enable_m32700ut_irq,
...@@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) ...@@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
static struct irq_chip m32700ut_pld_irq_type = static struct irq_chip m32700ut_pld_irq_type =
{ {
.typename = "M32700UT-PLD-IRQ", .name = "M32700UT-PLD-IRQ",
.startup = startup_m32700ut_pld_irq, .startup = startup_m32700ut_pld_irq,
.shutdown = shutdown_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq,
.enable = enable_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq,
...@@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq) ...@@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
static struct irq_chip m32700ut_lanpld_irq_type = static struct irq_chip m32700ut_lanpld_irq_type =
{ {
.typename = "M32700UT-PLD-LAN-IRQ", .name = "M32700UT-PLD-LAN-IRQ",
.startup = startup_m32700ut_lanpld_irq, .startup = startup_m32700ut_lanpld_irq,
.shutdown = shutdown_m32700ut_lanpld_irq, .shutdown = shutdown_m32700ut_lanpld_irq,
.enable = enable_m32700ut_lanpld_irq, .enable = enable_m32700ut_lanpld_irq,
...@@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) ...@@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
static struct irq_chip m32700ut_lcdpld_irq_type = static struct irq_chip m32700ut_lcdpld_irq_type =
{ {
.typename = "M32700UT-PLD-LCD-IRQ", .name = "M32700UT-PLD-LCD-IRQ",
.startup = startup_m32700ut_lcdpld_irq, .startup = startup_m32700ut_lcdpld_irq,
.shutdown = shutdown_m32700ut_lcdpld_irq, .shutdown = shutdown_m32700ut_lcdpld_irq,
.enable = enable_m32700ut_lcdpld_irq, .enable = enable_m32700ut_lcdpld_irq,
......
...@@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq) ...@@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq)
static struct irq_chip mappi_irq_type = static struct irq_chip mappi_irq_type =
{ {
.typename = "MAPPI-IRQ", .name = "MAPPI-IRQ",
.startup = startup_mappi_irq, .startup = startup_mappi_irq,
.shutdown = shutdown_mappi_irq, .shutdown = shutdown_mappi_irq,
.enable = enable_mappi_irq, .enable = enable_mappi_irq,
......
...@@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq) ...@@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq)
static struct irq_chip mappi2_irq_type = static struct irq_chip mappi2_irq_type =
{ {
.typename = "MAPPI2-IRQ", .name = "MAPPI2-IRQ",
.startup = startup_mappi2_irq, .startup = startup_mappi2_irq,
.shutdown = shutdown_mappi2_irq, .shutdown = shutdown_mappi2_irq,
.enable = enable_mappi2_irq, .enable = enable_mappi2_irq,
......
...@@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq) ...@@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq)
static struct irq_chip mappi3_irq_type = static struct irq_chip mappi3_irq_type =
{ {
.typename = "MAPPI3-IRQ", .name = "MAPPI3-IRQ",
.startup = startup_mappi3_irq, .startup = startup_mappi3_irq,
.shutdown = shutdown_mappi3_irq, .shutdown = shutdown_mappi3_irq,
.enable = enable_mappi3_irq, .enable = enable_mappi3_irq,
......
...@@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq) ...@@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq)
static struct irq_chip oaks32r_irq_type = static struct irq_chip oaks32r_irq_type =
{ {
.typename = "OAKS32R-IRQ", .name = "OAKS32R-IRQ",
.startup = startup_oaks32r_irq, .startup = startup_oaks32r_irq,
.shutdown = shutdown_oaks32r_irq, .shutdown = shutdown_oaks32r_irq,
.enable = enable_oaks32r_irq, .enable = enable_oaks32r_irq,
......
...@@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq) ...@@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq)
static struct irq_chip opsput_irq_type = static struct irq_chip opsput_irq_type =
{ {
.typename = "OPSPUT-IRQ", .name = "OPSPUT-IRQ",
.startup = startup_opsput_irq, .startup = startup_opsput_irq,
.shutdown = shutdown_opsput_irq, .shutdown = shutdown_opsput_irq,
.enable = enable_opsput_irq, .enable = enable_opsput_irq,
...@@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq) ...@@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq)
static struct irq_chip opsput_pld_irq_type = static struct irq_chip opsput_pld_irq_type =
{ {
.typename = "OPSPUT-PLD-IRQ", .name = "OPSPUT-PLD-IRQ",
.startup = startup_opsput_pld_irq, .startup = startup_opsput_pld_irq,
.shutdown = shutdown_opsput_pld_irq, .shutdown = shutdown_opsput_pld_irq,
.enable = enable_opsput_pld_irq, .enable = enable_opsput_pld_irq,
...@@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq) ...@@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq)
static struct irq_chip opsput_lanpld_irq_type = static struct irq_chip opsput_lanpld_irq_type =
{ {
.typename = "OPSPUT-PLD-LAN-IRQ", .name = "OPSPUT-PLD-LAN-IRQ",
.startup = startup_opsput_lanpld_irq, .startup = startup_opsput_lanpld_irq,
.shutdown = shutdown_opsput_lanpld_irq, .shutdown = shutdown_opsput_lanpld_irq,
.enable = enable_opsput_lanpld_irq, .enable = enable_opsput_lanpld_irq,
......
...@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq) ...@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq)
static struct irq_chip mappi_irq_type = static struct irq_chip mappi_irq_type =
{ {
.typename = "M32700-IRQ", .name = "M32700-IRQ",
.startup = startup_mappi_irq, .startup = startup_mappi_irq,
.shutdown = shutdown_mappi_irq, .shutdown = shutdown_mappi_irq,
.enable = enable_mappi_irq, .enable = enable_mappi_irq,
...@@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) ...@@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
static struct irq_chip m32700ut_pld_irq_type = static struct irq_chip m32700ut_pld_irq_type =
{ {
.typename = "USRV-PLD-IRQ", .name = "USRV-PLD-IRQ",
.startup = startup_m32700ut_pld_irq, .startup = startup_m32700ut_pld_irq,
.shutdown = shutdown_m32700ut_pld_irq, .shutdown = shutdown_m32700ut_pld_irq,
.enable = enable_m32700ut_pld_irq, .enable = enable_m32700ut_pld_irq,
......
...@@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) ...@@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
} }
static struct irq_chip msic_irq_chip = { static struct irq_chip msic_irq_chip = {
.mask = mask_msi_irq, .irq_mask = mask_msi_irq,
.unmask = unmask_msi_irq, .irq_unmask = unmask_msi_irq,
.shutdown = unmask_msi_irq, .irq_shutdown = mask_msi_irq,
.name = "AXON-MSI", .name = "AXON-MSI",
}; };
......
...@@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq) ...@@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq)
* at that level, so we do it here by hand. * at that level, so we do it here by hand.
*/ */
if (irq_to_desc(virq)->msi_desc) if (irq_to_desc(virq)->msi_desc)
unmask_msi_irq(virq); unmask_msi_irq(irq_get_irq_data(virq));
/* unmask it */ /* unmask it */
xics_unmask_irq(virq); xics_unmask_irq(virq);
......
...@@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq) ...@@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq)
} }
static struct irq_chip fsl_msi_chip = { static struct irq_chip fsl_msi_chip = {
.mask = mask_msi_irq, .irq_mask = mask_msi_irq,
.unmask = unmask_msi_irq, .irq_unmask = unmask_msi_irq,
.ack = fsl_msi_end_irq, .ack = fsl_msi_end_irq,
.name = "FSL-MSI", .name = "FSL-MSI",
}; };
......
...@@ -39,24 +39,24 @@ ...@@ -39,24 +39,24 @@
static struct mpic *msi_mpic; static struct mpic *msi_mpic;
static void mpic_pasemi_msi_mask_irq(unsigned int irq) static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
{ {
pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
mask_msi_irq(irq); mask_msi_irq(data);
mpic_mask_irq(irq); mpic_mask_irq(data->irq);
} }
static void mpic_pasemi_msi_unmask_irq(unsigned int irq) static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
{ {
pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
mpic_unmask_irq(irq); mpic_unmask_irq(data->irq);
unmask_msi_irq(irq); unmask_msi_irq(data);
} }
static struct irq_chip mpic_pasemi_msi_chip = { static struct irq_chip mpic_pasemi_msi_chip = {
.shutdown = mpic_pasemi_msi_mask_irq, .irq_shutdown = mpic_pasemi_msi_mask_irq,
.mask = mpic_pasemi_msi_mask_irq, .irq_mask = mpic_pasemi_msi_mask_irq,
.unmask = mpic_pasemi_msi_unmask_irq, .irq_unmask = mpic_pasemi_msi_unmask_irq,
.eoi = mpic_end_irq, .eoi = mpic_end_irq,
.set_type = mpic_set_irq_type, .set_type = mpic_set_irq_type,
.set_affinity = mpic_set_affinity, .set_affinity = mpic_set_affinity,
......
...@@ -23,22 +23,22 @@ ...@@ -23,22 +23,22 @@
/* A bit ugly, can we get this from the pci_dev somehow? */ /* A bit ugly, can we get this from the pci_dev somehow? */
static struct mpic *msi_mpic; static struct mpic *msi_mpic;
static void mpic_u3msi_mask_irq(unsigned int irq) static void mpic_u3msi_mask_irq(struct irq_data *data)
{ {
mask_msi_irq(irq); mask_msi_irq(data);
mpic_mask_irq(irq); mpic_mask_irq(data->irq);
} }
static void mpic_u3msi_unmask_irq(unsigned int irq) static void mpic_u3msi_unmask_irq(struct irq_data *data)
{ {
mpic_unmask_irq(irq); mpic_unmask_irq(data->irq);
unmask_msi_irq(irq); unmask_msi_irq(data);
} }
static struct irq_chip mpic_u3msi_chip = { static struct irq_chip mpic_u3msi_chip = {
.shutdown = mpic_u3msi_mask_irq, .irq_shutdown = mpic_u3msi_mask_irq,
.mask = mpic_u3msi_mask_irq, .irq_mask = mpic_u3msi_mask_irq,
.unmask = mpic_u3msi_unmask_irq, .irq_unmask = mpic_u3msi_unmask_irq,
.eoi = mpic_end_irq, .eoi = mpic_end_irq,
.set_type = mpic_set_irq_type, .set_type = mpic_set_irq_type,
.set_affinity = mpic_set_affinity, .set_affinity = mpic_set_affinity,
......
...@@ -290,7 +290,7 @@ void __init init_IRQ(void) ...@@ -290,7 +290,7 @@ void __init init_IRQ(void)
int __init arch_probe_nr_irqs(void) int __init arch_probe_nr_irqs(void)
{ {
nr_irqs = sh_mv.mv_nr_irqs; nr_irqs = sh_mv.mv_nr_irqs;
return 0; return NR_IRQS_LEGACY;
} }
#endif #endif
......
...@@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) ...@@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num)
static struct irq_chip msi_irq = { static struct irq_chip msi_irq = {
.name = "PCI-MSI", .name = "PCI-MSI",
.mask = mask_msi_irq, .irq_mask = mask_msi_irq,
.unmask = unmask_msi_irq, .irq_unmask = unmask_msi_irq,
.enable = unmask_msi_irq, .irq_enable = unmask_msi_irq,
.disable = mask_msi_irq, .irq_disable = mask_msi_irq,
/* XXX affinity XXX */ /* XXX affinity XXX */
}; };
......
...@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq) ...@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq)
} }
static struct irq_chip tile_irq_chip = { static struct irq_chip tile_irq_chip = {
.typename = "tile_irq_chip", .name = "tile_irq_chip",
.ack = tile_irq_chip_ack, .ack = tile_irq_chip_ack,
.eoi = tile_irq_chip_eoi, .eoi = tile_irq_chip_eoi,
.mask = tile_irq_chip_mask, .mask = tile_irq_chip_mask,
...@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif #endif
seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name); seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next) for (action = action->next; action; action = action->next)
......
...@@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif #endif
seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name); seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next) for (action=action->next; action; action = action->next)
...@@ -369,7 +369,7 @@ static void dummy(unsigned int irq) ...@@ -369,7 +369,7 @@ static void dummy(unsigned int irq)
/* This is used for everything else than the timer. */ /* This is used for everything else than the timer. */
static struct irq_chip normal_irq_type = { static struct irq_chip normal_irq_type = {
.typename = "SIGIO", .name = "SIGIO",
.release = free_irq_by_irq_and_dev, .release = free_irq_by_irq_and_dev,
.disable = dummy, .disable = dummy,
.enable = dummy, .enable = dummy,
...@@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = { ...@@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = {
}; };
static struct irq_chip SIGVTALRM_irq_type = { static struct irq_chip SIGVTALRM_irq_type = {
.typename = "SIGVTALRM", .name = "SIGVTALRM",
.release = free_irq_by_irq_and_dev, .release = free_irq_by_irq_and_dev,
.shutdown = dummy, /* never called */ .shutdown = dummy, /* never called */
.disable = dummy, .disable = dummy,
......
...@@ -63,6 +63,10 @@ config X86 ...@@ -63,6 +63,10 @@ config X86
select HAVE_USER_RETURN_NOTIFIER select HAVE_USER_RETURN_NOTIFIER
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_TEXT_POKE_SMP select HAVE_TEXT_POKE_SMP
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS) def_bool (KPROBES || PERF_EVENTS)
...@@ -204,20 +208,6 @@ config HAVE_INTEL_TXT ...@@ -204,20 +208,6 @@ config HAVE_INTEL_TXT
def_bool y def_bool y
depends on EXPERIMENTAL && DMAR && ACPI depends on EXPERIMENTAL && DMAR && ACPI
# Use the generic interrupt handling code in kernel/irq/:
config GENERIC_HARDIRQS
def_bool y
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config GENERIC_IRQ_PROBE
def_bool y
config GENERIC_PENDING_IRQ
def_bool y
depends on GENERIC_HARDIRQS && SMP
config USE_GENERIC_SMP_HELPERS config USE_GENERIC_SMP_HELPERS
def_bool y def_bool y
depends on SMP depends on SMP
...@@ -300,23 +290,6 @@ config X86_X2APIC ...@@ -300,23 +290,6 @@ config X86_X2APIC
If you don't know what to do here, say N. If you don't know what to do here, say N.
config SPARSE_IRQ
bool "Support sparse irq numbering"
depends on PCI_MSI || HT_IRQ
---help---
This enables support for sparse irqs. This is useful for distro
kernels that want to define a high CONFIG_NR_CPUS value but still
want to have low kernel memory footprint on smaller machines.
( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
out the irq_desc[] array in a more NUMA-friendly way. )
If you don't know what to do here, say N.
config NUMA_IRQ_DESC
def_bool y
depends on SPARSE_IRQ && NUMA
config X86_MPPARSE config X86_MPPARSE
bool "Enable MPS table" if ACPI bool "Enable MPS table" if ACPI
default y default y
......
...@@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void) ...@@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void)
} }
#endif #endif
extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
#else /* !CONFIG_X86_LOCAL_APIC */ #else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { } static inline void lapic_shutdown(void) { }
......
...@@ -131,6 +131,7 @@ ...@@ -131,6 +131,7 @@
#define APIC_EILVTn(n) (0x500 + 0x10 * n) #define APIC_EILVTn(n) (0x500 + 0x10 * n)
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4 #define APIC_EILVT_NR_AMD_10H 4
#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0 #define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2 #define APIC_EILVT_MSG_SMI 0x2
......
...@@ -74,10 +74,12 @@ extern void hpet_disable(void); ...@@ -74,10 +74,12 @@ extern void hpet_disable(void);
extern unsigned int hpet_readl(unsigned int a); extern unsigned int hpet_readl(unsigned int a);
extern void force_hpet_resume(void); extern void force_hpet_resume(void);
extern void hpet_msi_unmask(unsigned int irq); struct irq_data;
extern void hpet_msi_mask(unsigned int irq); extern void hpet_msi_unmask(struct irq_data *data);
extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); extern void hpet_msi_mask(struct irq_data *data);
extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); struct hpet_dev;
extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
......
...@@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, ...@@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
irq_attr->polarity = polarity; irq_attr->polarity = polarity;
} }
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
u16 sub_handle;
u8 irte_mask;
};
/* /*
* This is performance-critical, we want to do it O(1) * This is performance-critical, we want to do it O(1)
* *
...@@ -89,15 +96,17 @@ struct irq_cfg { ...@@ -89,15 +96,17 @@ struct irq_cfg {
cpumask_var_t old_domain; cpumask_var_t old_domain;
u8 vector; u8 vector;
u8 move_in_progress : 1; u8 move_in_progress : 1;
#ifdef CONFIG_INTR_REMAP
struct irq_2_iommu irq_2_iommu;
#endif
}; };
extern struct irq_cfg *irq_cfg(unsigned int);
extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
extern void send_cleanup_vector(struct irq_cfg *); extern void send_cleanup_vector(struct irq_cfg *);
struct irq_desc; struct irq_data;
extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
unsigned int *dest_id); unsigned int *dest_id);
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
extern void setup_ioapic_dest(void); extern void setup_ioapic_dest(void);
......
...@@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip; ...@@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip;
struct legacy_pic { struct legacy_pic {
int nr_legacy_irqs; int nr_legacy_irqs;
struct irq_chip *chip; struct irq_chip *chip;
void (*mask)(unsigned int irq);
void (*unmask)(unsigned int irq);
void (*mask_all)(void); void (*mask_all)(void);
void (*restore_mask)(void); void (*restore_mask)(void);
void (*init)(int auto_eoi); void (*init)(int auto_eoi);
......
...@@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); ...@@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void probe_nr_irqs_gsi(void); extern void probe_nr_irqs_gsi(void);
extern int setup_ioapic_entry(int apic, int irq,
struct IO_APIC_route_entry *entry,
unsigned int destination, int trigger,
int polarity, int vector, int pin);
extern void ioapic_write_entry(int apic, int pin,
struct IO_APIC_route_entry e);
extern void setup_ioapic_ids_from_mpc(void); extern void setup_ioapic_ids_from_mpc(void);
struct mp_ioapic_gsi{ struct mp_ioapic_gsi{
......
...@@ -24,10 +24,18 @@ static inline void prepare_irte(struct irte *irte, int vector, ...@@ -24,10 +24,18 @@ static inline void prepare_irte(struct irte *irte, int vector,
irte->dest_id = IRTE_DEST(dest); irte->dest_id = IRTE_DEST(dest);
irte->redir_hint = 1; irte->redir_hint = 1;
} }
static inline bool irq_remapped(struct irq_cfg *cfg)
{
return cfg->irq_2_iommu.iommu != NULL;
}
#else #else
static void prepare_irte(struct irte *irte, int vector, unsigned int dest) static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
{ {
} }
static inline bool irq_remapped(struct irq_cfg *cfg)
{
return false;
}
#endif #endif
#endif /* _ASM_X86_IRQ_REMAPPING_H */ #endif /* _ASM_X86_IRQ_REMAPPING_H */
...@@ -231,34 +231,6 @@ static void apbt_restart_clocksource(struct clocksource *cs) ...@@ -231,34 +231,6 @@ static void apbt_restart_clocksource(struct clocksource *cs)
apbt_start_counter(phy_cs_timer_id); apbt_start_counter(phy_cs_timer_id);
} }
/* Setup IRQ routing via IOAPIC */
#ifdef CONFIG_SMP
static void apbt_setup_irq(struct apbt_dev *adev)
{
struct irq_chip *chip;
struct irq_desc *desc;
/* timer0 irq has been setup early */
if (adev->irq == 0)
return;
desc = irq_to_desc(adev->irq);
chip = get_irq_chip(adev->irq);
disable_irq(adev->irq);
desc->status |= IRQ_MOVE_PCNTXT;
irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
/* APB timer irqs are set up as mp_irqs, timer is edge triggerred */
set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge");
enable_irq(adev->irq);
if (system_state == SYSTEM_BOOTING)
if (request_irq(adev->irq, apbt_interrupt_handler,
IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
adev->name, adev)) {
printk(KERN_ERR "Failed request IRQ for APBT%d\n",
adev->num);
}
}
#endif
static void apbt_enable_int(int n) static void apbt_enable_int(int n)
{ {
unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
...@@ -334,6 +306,27 @@ static int __init apbt_clockevent_register(void) ...@@ -334,6 +306,27 @@ static int __init apbt_clockevent_register(void)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void apbt_setup_irq(struct apbt_dev *adev)
{
/* timer0 irq has been setup early */
if (adev->irq == 0)
return;
if (system_state == SYSTEM_BOOTING) {
irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
/* APB timer irqs are set up as mp_irqs, timer is edge type */
__set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
if (request_irq(adev->irq, apbt_interrupt_handler,
IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
adev->name, adev)) {
printk(KERN_ERR "Failed request IRQ for APBT%d\n",
adev->num);
}
} else
enable_irq(adev->irq);
}
/* Should be called with per cpu */ /* Should be called with per cpu */
void apbt_setup_secondary_clock(void) void apbt_setup_secondary_clock(void)
{ {
...@@ -389,10 +382,11 @@ static int apbt_cpuhp_notify(struct notifier_block *n, ...@@ -389,10 +382,11 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) { switch (action & 0xf) {
case CPU_DEAD: case CPU_DEAD:
disable_irq(adev->irq);
apbt_disable_int(cpu); apbt_disable_int(cpu);
if (system_state == SYSTEM_RUNNING) if (system_state == SYSTEM_RUNNING) {
pr_debug("skipping APBT CPU %lu offline\n", cpu); pr_debug("skipping APBT CPU %lu offline\n", cpu);
else if (adev) { } else if (adev) {
pr_debug("APBT clockevent for cpu %lu offline\n", cpu); pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
free_irq(adev->irq, adev); free_irq(adev->irq, adev);
} }
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/kvm_para.h> #include <asm/kvm_para.h>
#include <asm/tsc.h> #include <asm/tsc.h>
#include <asm/atomic.h>
unsigned int num_processors; unsigned int num_processors;
...@@ -370,38 +371,87 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) ...@@ -370,38 +371,87 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
} }
/* /*
* Setup extended LVT, AMD specific (K8, family 10h) * Setup extended LVT, AMD specific
* *
* Vector mappings are hard coded. On K8 only offset 0 (APIC500) and * Software should use the LVT offsets the BIOS provides. The offsets
* MCE interrupts are supported. Thus MCE offset must be set to 0. * are determined by the subsystems using it like those for MCE
* threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
* are supported. Beginning with family 10h at least 4 offsets are
* available.
* *
* If mask=1, the LVT entry does not generate interrupts while mask=0 * Since the offsets must be consistent for all cores, we keep track
* enables the vector. See also the BKDGs. * of the LVT offsets in software and reserve the offset for the same
* vector also to be used on other cores. An offset is freed by
* setting the entry to APIC_EILVT_MASKED.
*
* If the BIOS is right, there should be no conflicts. Otherwise a
* "[Firmware Bug]: ..." error message is generated. However, if
* software does not properly determines the offsets, it is not
* necessarily a BIOS bug.
*/ */
#define APIC_EILVT_LVTOFF_MCE 0 static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
#define APIC_EILVT_LVTOFF_IBS 1
static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
{ {
unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); return (old & APIC_EILVT_MASKED)
unsigned int v = (mask << 16) | (msg_type << 8) | vector; || (new == APIC_EILVT_MASKED)
|| ((new & ~APIC_EILVT_MASKED) == old);
apic_write(reg, v);
} }
u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
{ {
setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); unsigned int rsvd; /* 0: uninitialized */
return APIC_EILVT_LVTOFF_MCE;
if (offset >= APIC_EILVT_NR_MAX)
return ~0;
rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED;
do {
if (rsvd &&
!eilvt_entry_is_changeable(rsvd, new))
/* may not change if vectors are different */
return rsvd;
rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
} while (rsvd != new);
return new;
} }
u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) /*
* If mask=1, the LVT entry does not generate interrupts while mask=0
* enables the vector. See also the BKDGs.
*/
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
{ {
setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); unsigned long reg = APIC_EILVTn(offset);
return APIC_EILVT_LVTOFF_IBS; unsigned int new, old, reserved;
new = (mask << 16) | (msg_type << 8) | vector;
old = apic_read(reg);
reserved = reserve_eilvt_offset(offset, new);
if (reserved != new) {
pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but "
"vector 0x%x was already reserved by another core, "
"APIC%lX=0x%x\n",
smp_processor_id(), new, reserved, reg, old);
return -EINVAL;
}
if (!eilvt_entry_is_changeable(old, new)) {
pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but "
"register already in use, APIC%lX=0x%x\n",
smp_processor_id(), new, reg, old);
return -EBUSY;
}
apic_write(reg, new);
return 0;
} }
EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
/* /*
* Program the next event, relative to now * Program the next event, relative to now
......
This diff is collapsed.
...@@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void) ...@@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void)
error: error:
if (nmi_watchdog == NMI_IO_APIC) { if (nmi_watchdog == NMI_IO_APIC) {
if (!timer_through_8259) if (!timer_through_8259)
legacy_pic->chip->mask(0); legacy_pic->mask(0);
on_each_cpu(__acpi_nmi_disable, NULL, 1); on_each_cpu(__acpi_nmi_disable, NULL, 1);
} }
......
...@@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
u32 low = 0, high = 0, address = 0; u32 low = 0, high = 0, address = 0;
unsigned int bank, block; unsigned int bank, block;
struct thresh_restart tr; struct thresh_restart tr;
u8 lvt_off; int lvt_off = -1;
u8 offset;
for (bank = 0; bank < NR_BANKS; ++bank) { for (bank = 0; bank < NR_BANKS; ++bank) {
for (block = 0; block < NR_BLOCKS; ++block) { for (block = 0; block < NR_BLOCKS; ++block) {
...@@ -162,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -162,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
if (shared_bank[bank] && c->cpu_core_id) if (shared_bank[bank] && c->cpu_core_id)
break; break;
#endif #endif
lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, offset = (high & MASK_LVTOFF_HI) >> 20;
APIC_EILVT_MSG_FIX, 0); if (lvt_off < 0) {
if (setup_APIC_eilvt(offset,
THRESHOLD_APIC_VECTOR,
APIC_EILVT_MSG_FIX, 0)) {
pr_err(FW_BUG "cpu %d, failed to "
"setup threshold interrupt "
"for bank %d, block %d "
"(MSR%08X=0x%x%08x)",
smp_processor_id(), bank, block,
address, high, low);
continue;
}
lvt_off = offset;
} else if (lvt_off != offset) {
pr_err(FW_BUG "cpu %d, invalid threshold "
"interrupt offset %d for bank %d,"
"block %d (MSR%08X=0x%x%08x)",
smp_processor_id(), lvt_off, bank,
block, address, high, low);
continue;
}
high &= ~MASK_LVTOFF_HI; high &= ~MASK_LVTOFF_HI;
high |= lvt_off << 20; high |= lvt_off << 20;
......
...@@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta, ...@@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta,
static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
static struct hpet_dev *hpet_devs; static struct hpet_dev *hpet_devs;
void hpet_msi_unmask(unsigned int irq) void hpet_msi_unmask(struct irq_data *data)
{ {
struct hpet_dev *hdev = get_irq_data(irq); struct hpet_dev *hdev = data->handler_data;
unsigned int cfg; unsigned int cfg;
/* unmask it */ /* unmask it */
...@@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq) ...@@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq)
hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
} }
void hpet_msi_mask(unsigned int irq) void hpet_msi_mask(struct irq_data *data)
{ {
struct hpet_dev *hdev = data->handler_data;
unsigned int cfg; unsigned int cfg;
struct hpet_dev *hdev = get_irq_data(irq);
/* mask it */ /* mask it */
cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
...@@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq) ...@@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq)
hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
} }
void hpet_msi_write(unsigned int irq, struct msi_msg *msg) void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
{ {
struct hpet_dev *hdev = get_irq_data(irq);
hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
} }
void hpet_msi_read(unsigned int irq, struct msi_msg *msg) void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
{ {
struct hpet_dev *hdev = get_irq_data(irq);
msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
msg->address_hi = 0; msg->address_hi = 0;
......
...@@ -29,24 +29,10 @@ ...@@ -29,24 +29,10 @@
* plus some generic x86 specific things if generic specifics makes * plus some generic x86 specific things if generic specifics makes
* any sense at all. * any sense at all.
*/ */
static void init_8259A(int auto_eoi);
static int i8259A_auto_eoi; static int i8259A_auto_eoi;
DEFINE_RAW_SPINLOCK(i8259A_lock); DEFINE_RAW_SPINLOCK(i8259A_lock);
static void mask_and_ack_8259A(unsigned int);
static void mask_8259A(void);
static void unmask_8259A(void);
static void disable_8259A_irq(unsigned int irq);
static void enable_8259A_irq(unsigned int irq);
static void init_8259A(int auto_eoi);
static int i8259A_irq_pending(unsigned int irq);
struct irq_chip i8259A_chip = {
.name = "XT-PIC",
.mask = disable_8259A_irq,
.disable = disable_8259A_irq,
.unmask = enable_8259A_irq,
.mask_ack = mask_and_ack_8259A,
};
/* /*
* 8259A PIC functions to handle ISA devices: * 8259A PIC functions to handle ISA devices:
...@@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff; ...@@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff;
*/ */
unsigned long io_apic_irqs; unsigned long io_apic_irqs;
static void disable_8259A_irq(unsigned int irq) static void mask_8259A_irq(unsigned int irq)
{ {
unsigned int mask = 1 << irq; unsigned int mask = 1 << irq;
unsigned long flags; unsigned long flags;
...@@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq) ...@@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq)
raw_spin_unlock_irqrestore(&i8259A_lock, flags); raw_spin_unlock_irqrestore(&i8259A_lock, flags);
} }
static void enable_8259A_irq(unsigned int irq) static void disable_8259A_irq(struct irq_data *data)
{
mask_8259A_irq(data->irq);
}
static void unmask_8259A_irq(unsigned int irq)
{ {
unsigned int mask = ~(1 << irq); unsigned int mask = ~(1 << irq);
unsigned long flags; unsigned long flags;
...@@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq) ...@@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq)
raw_spin_unlock_irqrestore(&i8259A_lock, flags); raw_spin_unlock_irqrestore(&i8259A_lock, flags);
} }
static void enable_8259A_irq(struct irq_data *data)
{
unmask_8259A_irq(data->irq);
}
static int i8259A_irq_pending(unsigned int irq) static int i8259A_irq_pending(unsigned int irq)
{ {
unsigned int mask = 1<<irq; unsigned int mask = 1<<irq;
...@@ -117,7 +113,7 @@ static void make_8259A_irq(unsigned int irq) ...@@ -117,7 +113,7 @@ static void make_8259A_irq(unsigned int irq)
disable_irq_nosync(irq); disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq); io_apic_irqs &= ~(1<<irq);
set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
"XT"); i8259A_chip.name);
enable_irq(irq); enable_irq(irq);
} }
...@@ -150,8 +146,9 @@ static inline int i8259A_irq_real(unsigned int irq) ...@@ -150,8 +146,9 @@ static inline int i8259A_irq_real(unsigned int irq)
* first, _then_ send the EOI, and the order of EOI * first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important! * to the two 8259s is important!
*/ */
static void mask_and_ack_8259A(unsigned int irq) static void mask_and_ack_8259A(struct irq_data *data)
{ {
unsigned int irq = data->irq;
unsigned int irqmask = 1 << irq; unsigned int irqmask = 1 << irq;
unsigned long flags; unsigned long flags;
...@@ -223,6 +220,14 @@ static void mask_and_ack_8259A(unsigned int irq) ...@@ -223,6 +220,14 @@ static void mask_and_ack_8259A(unsigned int irq)
} }
} }
struct irq_chip i8259A_chip = {
.name = "XT-PIC",
.irq_mask = disable_8259A_irq,
.irq_disable = disable_8259A_irq,
.irq_unmask = enable_8259A_irq,
.irq_mask_ack = mask_and_ack_8259A,
};
static char irq_trigger[2]; static char irq_trigger[2];
/** /**
* ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
...@@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi) ...@@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi)
* In AEOI mode we just have to mask the interrupt * In AEOI mode we just have to mask the interrupt
* when acking. * when acking.
*/ */
i8259A_chip.mask_ack = disable_8259A_irq; i8259A_chip.irq_mask_ack = disable_8259A_irq;
else else
i8259A_chip.mask_ack = mask_and_ack_8259A; i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */ udelay(100); /* wait for 8259A to initialize */
...@@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi) ...@@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi)
static void legacy_pic_noop(void) { }; static void legacy_pic_noop(void) { };
static void legacy_pic_uint_noop(unsigned int unused) { }; static void legacy_pic_uint_noop(unsigned int unused) { };
static void legacy_pic_int_noop(int unused) { }; static void legacy_pic_int_noop(int unused) { };
static struct irq_chip dummy_pic_chip = {
.name = "dummy pic",
.mask = legacy_pic_uint_noop,
.unmask = legacy_pic_uint_noop,
.disable = legacy_pic_uint_noop,
.mask_ack = legacy_pic_uint_noop,
};
static int legacy_pic_irq_pending_noop(unsigned int irq) static int legacy_pic_irq_pending_noop(unsigned int irq)
{ {
return 0; return 0;
...@@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq) ...@@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq)
struct legacy_pic null_legacy_pic = { struct legacy_pic null_legacy_pic = {
.nr_legacy_irqs = 0, .nr_legacy_irqs = 0,
.chip = &dummy_pic_chip, .chip = &dummy_irq_chip,
.mask = legacy_pic_uint_noop,
.unmask = legacy_pic_uint_noop,
.mask_all = legacy_pic_noop, .mask_all = legacy_pic_noop,
.restore_mask = legacy_pic_noop, .restore_mask = legacy_pic_noop,
.init = legacy_pic_int_noop, .init = legacy_pic_int_noop,
...@@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = { ...@@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = {
struct legacy_pic default_legacy_pic = { struct legacy_pic default_legacy_pic = {
.nr_legacy_irqs = NR_IRQS_LEGACY, .nr_legacy_irqs = NR_IRQS_LEGACY,
.chip = &i8259A_chip, .chip = &i8259A_chip,
.mask_all = mask_8259A, .mask = mask_8259A_irq,
.unmask = unmask_8259A_irq,
.mask_all = mask_8259A,
.restore_mask = unmask_8259A, .restore_mask = unmask_8259A,
.init = init_8259A, .init = init_8259A,
.irq_pending = i8259A_irq_pending, .irq_pending = i8259A_irq_pending,
......
...@@ -159,7 +159,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -159,7 +159,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%*d: ", prec, i); seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
seq_printf(p, " %8s", desc->chip->name); seq_printf(p, " %8s", desc->irq_data.chip->name);
seq_printf(p, "-%-8s", desc->name); seq_printf(p, "-%-8s", desc->name);
if (action) { if (action) {
...@@ -282,6 +282,7 @@ void fixup_irqs(void) ...@@ -282,6 +282,7 @@ void fixup_irqs(void)
unsigned int irq, vector; unsigned int irq, vector;
static int warned; static int warned;
struct irq_desc *desc; struct irq_desc *desc;
struct irq_data *data;
for_each_irq_desc(irq, desc) { for_each_irq_desc(irq, desc) {
int break_affinity = 0; int break_affinity = 0;
...@@ -296,7 +297,8 @@ void fixup_irqs(void) ...@@ -296,7 +297,8 @@ void fixup_irqs(void)
/* interrupt's are disabled at this point */ /* interrupt's are disabled at this point */
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
affinity = desc->affinity; data = &desc->irq_data;
affinity = data->affinity;
if (!irq_has_action(irq) || if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) { cpumask_equal(affinity, cpu_online_mask)) {
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
...@@ -315,16 +317,16 @@ void fixup_irqs(void) ...@@ -315,16 +317,16 @@ void fixup_irqs(void)
affinity = cpu_all_mask; affinity = cpu_all_mask;
} }
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask)
desc->chip->mask(irq); data->chip->irq_mask(data);
if (desc->chip->set_affinity) if (data->chip->irq_set_affinity)
desc->chip->set_affinity(irq, affinity); data->chip->irq_set_affinity(data, affinity, true);
else if (!(warned++)) else if (!(warned++))
set_affinity = 0; set_affinity = 0;
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask)
desc->chip->unmask(irq); data->chip->irq_unmask(data);
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
...@@ -355,10 +357,10 @@ void fixup_irqs(void) ...@@ -355,10 +357,10 @@ void fixup_irqs(void)
if (irr & (1 << (vector % 32))) { if (irr & (1 << (vector % 32))) {
irq = __get_cpu_var(vector_irq)[vector]; irq = __get_cpu_var(vector_irq)[vector];
desc = irq_to_desc(irq); data = irq_get_irq_data(irq);
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
if (desc->chip->retrigger) if (data->chip->irq_retrigger)
desc->chip->retrigger(irq); data->chip->irq_retrigger(data);
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
} }
} }
......
...@@ -100,6 +100,8 @@ int vector_used_by_percpu_irq(unsigned int vector) ...@@ -100,6 +100,8 @@ int vector_used_by_percpu_irq(unsigned int vector)
void __init init_ISA_irqs(void) void __init init_ISA_irqs(void)
{ {
struct irq_chip *chip = legacy_pic->chip;
const char *name = chip->name;
int i; int i;
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
...@@ -107,19 +109,8 @@ void __init init_ISA_irqs(void) ...@@ -107,19 +109,8 @@ void __init init_ISA_irqs(void)
#endif #endif
legacy_pic->init(0); legacy_pic->init(0);
/* for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
* 16 old-style INTA-cycle interrupts: set_irq_chip_and_handler_name(i, chip, handle_level_irq, name);
*/
for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT");
}
} }
void __init init_IRQ(void) void __init init_IRQ(void)
......
...@@ -323,9 +323,9 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -323,9 +323,9 @@ notrace static void __cpuinit start_secondary(void *unused)
check_tsc_sync_target(); check_tsc_sync_target();
if (nmi_watchdog == NMI_IO_APIC) { if (nmi_watchdog == NMI_IO_APIC) {
legacy_pic->chip->mask(0); legacy_pic->mask(0);
enable_NMI_through_LVT0(); enable_NMI_through_LVT0();
legacy_pic->chip->unmask(0); legacy_pic->unmask(0);
} }
/* This must be done before setting cpu_online_mask */ /* This must be done before setting cpu_online_mask */
......
...@@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{ ...@@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{
static spinlock_t uv_irq_lock; static spinlock_t uv_irq_lock;
static struct rb_root uv_irq_root; static struct rb_root uv_irq_root;
static int uv_set_irq_affinity(unsigned int, const struct cpumask *); static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
static void uv_noop(unsigned int irq) static void uv_noop(struct irq_data *data) { }
{
}
static unsigned int uv_noop_ret(unsigned int irq)
{
return 0;
}
static void uv_ack_apic(unsigned int irq) static void uv_ack_apic(struct irq_data *data)
{ {
ack_APIC_irq(); ack_APIC_irq();
} }
static struct irq_chip uv_irq_chip = { static struct irq_chip uv_irq_chip = {
.name = "UV-CORE", .name = "UV-CORE",
.startup = uv_noop_ret, .irq_mask = uv_noop,
.shutdown = uv_noop, .irq_unmask = uv_noop,
.enable = uv_noop, .irq_eoi = uv_ack_apic,
.disable = uv_noop, .irq_set_affinity = uv_set_irq_affinity,
.ack = uv_noop,
.mask = uv_noop,
.unmask = uv_noop,
.eoi = uv_ack_apic,
.end = uv_noop,
.set_affinity = uv_set_irq_affinity,
}; };
/* /*
...@@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, ...@@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit) unsigned long mmr_offset, int limit)
{ {
const struct cpumask *eligible_cpu = cpumask_of(cpu); const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg = get_irq_chip_data(irq);
struct irq_cfg *cfg;
int mmr_pnode;
unsigned long mmr_value; unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry; struct uv_IO_APIC_route_entry *entry;
int err; int mmr_pnode, err;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long)); sizeof(unsigned long));
cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, eligible_cpu); err = assign_irq_vector(irq, cfg, eligible_cpu);
if (err != 0) if (err != 0)
return err; return err;
if (limit == UV_AFFINITY_CPU) if (limit == UV_AFFINITY_CPU)
desc->status |= IRQ_NO_BALANCING; irq_set_status_flags(irq, IRQ_NO_BALANCING);
else else
desc->status |= IRQ_MOVE_PCNTXT; irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
irq_name); irq_name);
...@@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) ...@@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
} }
static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) static int
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg = data->chip_data;
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest; unsigned int dest;
unsigned long mmr_value; unsigned long mmr_value, mmr_offset;
struct uv_IO_APIC_route_entry *entry; struct uv_IO_APIC_route_entry *entry;
unsigned long mmr_offset;
int mmr_pnode; int mmr_pnode;
if (set_desc_affinity(desc, mask, &dest)) if (__ioapic_set_affinity(data, mask, &dest))
return -1; return -1;
mmr_value = 0; mmr_value = 0;
...@@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) ...@@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
entry->dest = dest; entry->dest = dest;
/* Get previously stored MMR and pnode of hub sourcing interrupts */ /* Get previously stored MMR and pnode of hub sourcing interrupts */
if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
return -1; return -1;
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
......
...@@ -66,10 +66,7 @@ static void __init visws_time_init(void) ...@@ -66,10 +66,7 @@ static void __init visws_time_init(void)
} }
/* Replaces the default init_ISA_irqs in the generic setup */ /* Replaces the default init_ISA_irqs in the generic setup */
static void __init visws_pre_intr_init(void) static void __init visws_pre_intr_init(void);
{
init_VISWS_APIC_irqs();
}
/* Quirk for machine specific memory setup. */ /* Quirk for machine specific memory setup. */
...@@ -429,67 +426,34 @@ static int is_co_apic(unsigned int irq) ...@@ -429,67 +426,34 @@ static int is_co_apic(unsigned int irq)
/* /*
* This is the SGI Cobalt (IO-)APIC: * This is the SGI Cobalt (IO-)APIC:
*/ */
static void enable_cobalt_irq(struct irq_data *data)
static void enable_cobalt_irq(unsigned int irq)
{ {
co_apic_set(is_co_apic(irq), irq); co_apic_set(is_co_apic(data->irq), data->irq);
} }
static void disable_cobalt_irq(unsigned int irq) static void disable_cobalt_irq(struct irq_data *data)
{ {
int entry = is_co_apic(irq); int entry = is_co_apic(data->irq);
co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK);
co_apic_read(CO_APIC_LO(entry)); co_apic_read(CO_APIC_LO(entry));
} }
/* static void ack_cobalt_irq(struct irq_data *data)
* "irq" really just serves to identify the device. Here is where we
* map this to the Cobalt APIC entry where it's physically wired.
* This is called via request_irq -> setup_irq -> irq_desc->startup()
*/
static unsigned int startup_cobalt_irq(unsigned int irq)
{ {
unsigned long flags; unsigned long flags;
struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags); spin_lock_irqsave(&cobalt_lock, flags);
if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) disable_cobalt_irq(data);
desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
enable_cobalt_irq(irq);
spin_unlock_irqrestore(&cobalt_lock, flags);
return 0;
}
static void ack_cobalt_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&cobalt_lock, flags);
disable_cobalt_irq(irq);
apic_write(APIC_EOI, APIC_EIO_ACK); apic_write(APIC_EOI, APIC_EIO_ACK);
spin_unlock_irqrestore(&cobalt_lock, flags); spin_unlock_irqrestore(&cobalt_lock, flags);
} }
static void end_cobalt_irq(unsigned int irq)
{
unsigned long flags;
struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags);
if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
enable_cobalt_irq(irq);
spin_unlock_irqrestore(&cobalt_lock, flags);
}
static struct irq_chip cobalt_irq_type = { static struct irq_chip cobalt_irq_type = {
.name = "Cobalt-APIC", .name = "Cobalt-APIC",
.startup = startup_cobalt_irq, .irq_enable = enable_cobalt_irq,
.shutdown = disable_cobalt_irq, .irq_disable = disable_cobalt_irq,
.enable = enable_cobalt_irq, .irq_ack = ack_cobalt_irq,
.disable = disable_cobalt_irq,
.ack = ack_cobalt_irq,
.end = end_cobalt_irq,
}; };
...@@ -503,35 +467,34 @@ static struct irq_chip cobalt_irq_type = { ...@@ -503,35 +467,34 @@ static struct irq_chip cobalt_irq_type = {
* interrupt controller type, and through a special virtual interrupt- * interrupt controller type, and through a special virtual interrupt-
* controller. Device drivers only see the virtual interrupt sources. * controller. Device drivers only see the virtual interrupt sources.
*/ */
static unsigned int startup_piix4_master_irq(unsigned int irq) static unsigned int startup_piix4_master_irq(struct irq_data *data)
{ {
legacy_pic->init(0); legacy_pic->init(0);
enable_cobalt_irq(data);
return startup_cobalt_irq(irq);
} }
static void end_piix4_master_irq(unsigned int irq) static void end_piix4_master_irq(struct irq_data *data)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cobalt_lock, flags); spin_lock_irqsave(&cobalt_lock, flags);
enable_cobalt_irq(irq); enable_cobalt_irq(data);
spin_unlock_irqrestore(&cobalt_lock, flags); spin_unlock_irqrestore(&cobalt_lock, flags);
} }
static struct irq_chip piix4_master_irq_type = { static struct irq_chip piix4_master_irq_type = {
.name = "PIIX4-master", .name = "PIIX4-master",
.startup = startup_piix4_master_irq, .irq_startup = startup_piix4_master_irq,
.ack = ack_cobalt_irq, .irq_ack = ack_cobalt_irq,
.end = end_piix4_master_irq,
}; };
static void pii4_mask(struct irq_data *data) { }
static struct irq_chip piix4_virtual_irq_type = { static struct irq_chip piix4_virtual_irq_type = {
.name = "PIIX4-virtual", .name = "PIIX4-virtual",
.mask = pii4_mask,
}; };
/* /*
* PIIX4-8259 master/virtual functions to handle interrupt requests * PIIX4-8259 master/virtual functions to handle interrupt requests
* from legacy devices: floppy, parallel, serial, rtc. * from legacy devices: floppy, parallel, serial, rtc.
...@@ -549,9 +512,8 @@ static struct irq_chip piix4_virtual_irq_type = { ...@@ -549,9 +512,8 @@ static struct irq_chip piix4_virtual_irq_type = {
*/ */
static irqreturn_t piix4_master_intr(int irq, void *dev_id) static irqreturn_t piix4_master_intr(int irq, void *dev_id)
{ {
int realirq;
struct irq_desc *desc;
unsigned long flags; unsigned long flags;
int realirq;
raw_spin_lock_irqsave(&i8259A_lock, flags); raw_spin_lock_irqsave(&i8259A_lock, flags);
...@@ -592,18 +554,10 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) ...@@ -592,18 +554,10 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
raw_spin_unlock_irqrestore(&i8259A_lock, flags); raw_spin_unlock_irqrestore(&i8259A_lock, flags);
desc = irq_to_desc(realirq);
/* /*
* handle this 'virtual interrupt' as a Cobalt one now. * handle this 'virtual interrupt' as a Cobalt one now.
*/ */
kstat_incr_irqs_this_cpu(realirq, desc); generic_handle_irq(realirq);
if (likely(desc->action != NULL))
handle_IRQ_event(realirq, desc->action);
if (!(desc->status & IRQ_DISABLED))
legacy_pic->chip->unmask(realirq);
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -624,41 +578,35 @@ static struct irqaction cascade_action = { ...@@ -624,41 +578,35 @@ static struct irqaction cascade_action = {
static inline void set_piix4_virtual_irq_type(void) static inline void set_piix4_virtual_irq_type(void)
{ {
piix4_virtual_irq_type.shutdown = i8259A_chip.mask;
piix4_virtual_irq_type.enable = i8259A_chip.unmask; piix4_virtual_irq_type.enable = i8259A_chip.unmask;
piix4_virtual_irq_type.disable = i8259A_chip.mask; piix4_virtual_irq_type.disable = i8259A_chip.mask;
piix4_virtual_irq_type.unmask = i8259A_chip.unmask;
} }
void init_VISWS_APIC_irqs(void) static void __init visws_pre_intr_init(void)
{ {
int i; int i;
for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { set_piix4_virtual_irq_type();
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = 0;
desc->depth = 1;
if (i == 0) { for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
desc->chip = &cobalt_irq_type; struct irq_chip *chip = NULL;
}
else if (i == CO_IRQ_IDE0) { if (i == 0)
desc->chip = &cobalt_irq_type; chip = &cobalt_irq_type;
} else if (i == CO_IRQ_IDE0)
else if (i == CO_IRQ_IDE1) { chip = &cobalt_irq_type;
desc->chip = &cobalt_irq_type; else if (i == CO_IRQ_IDE1)
} >chip = &cobalt_irq_type;
else if (i == CO_IRQ_8259) { else if (i == CO_IRQ_8259)
desc->chip = &piix4_master_irq_type; chip = &piix4_master_irq_type;
} else if (i < CO_IRQ_APIC0)
else if (i < CO_IRQ_APIC0) { chip = &piix4_virtual_irq_type;
set_piix4_virtual_irq_type(); else if (IS_CO_APIC(i))
desc->chip = &piix4_virtual_irq_type; chip = &cobalt_irq_type;
}
else if (IS_CO_APIC(i)) { if (chip)
desc->chip = &cobalt_irq_type; set_irq_chip(i, chip);
}
} }
setup_irq(CO_IRQ_8259, &master_action); setup_irq(CO_IRQ_8259, &master_action);
......
...@@ -791,22 +791,22 @@ static void lguest_flush_tlb_kernel(void) ...@@ -791,22 +791,22 @@ static void lguest_flush_tlb_kernel(void)
* simple as setting a bit. We don't actually "ack" interrupts as such, we * simple as setting a bit. We don't actually "ack" interrupts as such, we
* just mask and unmask them. I wonder if we should be cleverer? * just mask and unmask them. I wonder if we should be cleverer?
*/ */
static void disable_lguest_irq(unsigned int irq) static void disable_lguest_irq(struct irq_data *data)
{ {
set_bit(irq, lguest_data.blocked_interrupts); set_bit(data->irq, lguest_data.blocked_interrupts);
} }
static void enable_lguest_irq(unsigned int irq) static void enable_lguest_irq(struct irq_data *data)
{ {
clear_bit(irq, lguest_data.blocked_interrupts); clear_bit(data->irq, lguest_data.blocked_interrupts);
} }
/* This structure describes the lguest IRQ controller. */ /* This structure describes the lguest IRQ controller. */
static struct irq_chip lguest_irq_controller = { static struct irq_chip lguest_irq_controller = {
.name = "lguest", .name = "lguest",
.mask = disable_lguest_irq, .irq_mask = disable_lguest_irq,
.mask_ack = disable_lguest_irq, .irq_mask_ack = disable_lguest_irq,
.unmask = enable_lguest_irq, .irq_unmask = enable_lguest_irq,
}; };
/* /*
...@@ -838,12 +838,12 @@ static void __init lguest_init_IRQ(void) ...@@ -838,12 +838,12 @@ static void __init lguest_init_IRQ(void)
* rather than set them in lguest_init_IRQ we are called here every time an * rather than set them in lguest_init_IRQ we are called here every time an
* lguest device needs an interrupt. * lguest device needs an interrupt.
* *
* FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should * FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should
* pass that up! * pass that up!
*/ */
void lguest_setup_irq(unsigned int irq) void lguest_setup_irq(unsigned int irq)
{ {
irq_to_desc_alloc_node(irq, 0); irq_alloc_desc_at(irq, 0);
set_irq_chip_and_handler_name(irq, &lguest_irq_controller, set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
handle_level_irq, "level"); handle_level_irq, "level");
} }
......
...@@ -64,15 +64,22 @@ static u64 ibs_op_ctl; ...@@ -64,15 +64,22 @@ static u64 ibs_op_ctl;
* IBS cpuid feature detection * IBS cpuid feature detection
*/ */
#define IBS_CPUID_FEATURES 0x8000001b #define IBS_CPUID_FEATURES 0x8000001b
/* /*
* Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
* bit 0 is used to indicate the existence of IBS. * bit 0 is used to indicate the existence of IBS.
*/ */
#define IBS_CAPS_AVAIL (1LL<<0) #define IBS_CAPS_AVAIL (1U<<0)
#define IBS_CAPS_RDWROPCNT (1LL<<3) #define IBS_CAPS_RDWROPCNT (1U<<3)
#define IBS_CAPS_OPCNT (1LL<<4) #define IBS_CAPS_OPCNT (1U<<4)
/*
* IBS APIC setup
*/
#define IBSCTL 0x1cc
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK 0x0F
/* /*
* IBS randomization macros * IBS randomization macros
...@@ -266,6 +273,74 @@ static void op_amd_stop_ibs(void) ...@@ -266,6 +273,74 @@ static void op_amd_stop_ibs(void)
wrmsrl(MSR_AMD64_IBSOPCTL, 0); wrmsrl(MSR_AMD64_IBSOPCTL, 0);
} }
static inline int eilvt_is_available(int offset)
{
/* check if we may assign a vector */
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
}
static inline int ibs_eilvt_valid(void)
{
u64 val;
int offset;
rdmsrl(MSR_AMD64_IBSCTL, val);
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
pr_err(FW_BUG "cpu %d, invalid IBS "
"interrupt offset %d (MSR%08X=0x%016llx)",
smp_processor_id(), offset,
MSR_AMD64_IBSCTL, val);
return 0;
}
offset = val & IBSCTL_LVT_OFFSET_MASK;
if (eilvt_is_available(offset))
return !0;
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
"not available (MSR%08X=0x%016llx)",
smp_processor_id(), offset,
MSR_AMD64_IBSCTL, val);
return 0;
}
static inline int get_ibs_offset(void)
{
u64 val;
rdmsrl(MSR_AMD64_IBSCTL, val);
if (!(val & IBSCTL_LVT_OFFSET_VALID))
return -EINVAL;
return val & IBSCTL_LVT_OFFSET_MASK;
}
static void setup_APIC_ibs(void)
{
int offset;
offset = get_ibs_offset();
if (offset < 0)
goto failed;
if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
return;
failed:
pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n",
smp_processor_id());
}
static void clear_APIC_ibs(void)
{
int offset;
offset = get_ibs_offset();
if (offset >= 0)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
...@@ -376,13 +451,13 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -376,13 +451,13 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
} }
if (ibs_caps) if (ibs_caps)
setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); setup_APIC_ibs();
} }
static void op_amd_cpu_shutdown(void) static void op_amd_cpu_shutdown(void)
{ {
if (ibs_caps) if (ibs_caps)
setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); clear_APIC_ibs();
} }
static int op_amd_check_ctrs(struct pt_regs * const regs, static int op_amd_check_ctrs(struct pt_regs * const regs,
...@@ -445,16 +520,11 @@ static void op_amd_stop(struct op_msrs const * const msrs) ...@@ -445,16 +520,11 @@ static void op_amd_stop(struct op_msrs const * const msrs)
op_amd_stop_ibs(); op_amd_stop_ibs();
} }
static int __init_ibs_nmi(void) static int setup_ibs_ctl(int ibs_eilvt_off)
{ {
#define IBSCTL_LVTOFFSETVAL (1 << 8)
#define IBSCTL 0x1cc
struct pci_dev *cpu_cfg; struct pci_dev *cpu_cfg;
int nodes; int nodes;
u32 value = 0; u32 value = 0;
u8 ibs_eilvt_off;
ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
nodes = 0; nodes = 0;
cpu_cfg = NULL; cpu_cfg = NULL;
...@@ -466,21 +536,60 @@ static int __init_ibs_nmi(void) ...@@ -466,21 +536,60 @@ static int __init_ibs_nmi(void)
break; break;
++nodes; ++nodes;
pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
| IBSCTL_LVTOFFSETVAL); | IBSCTL_LVT_OFFSET_VALID);
pci_read_config_dword(cpu_cfg, IBSCTL, &value); pci_read_config_dword(cpu_cfg, IBSCTL, &value);
if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
pci_dev_put(cpu_cfg); pci_dev_put(cpu_cfg);
printk(KERN_DEBUG "Failed to setup IBS LVT offset, " printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
"IBSCTL = 0x%08x", value); "IBSCTL = 0x%08x\n", value);
return 1; return -EINVAL;
} }
} while (1); } while (1);
if (!nodes) { if (!nodes) {
printk(KERN_DEBUG "No CPU node configured for IBS"); printk(KERN_DEBUG "No CPU node configured for IBS\n");
return 1; return -ENODEV;
}
return 0;
}
static int force_ibs_eilvt_setup(void)
{
int i;
int ret;
/* find the next free available EILVT entry */
for (i = 1; i < 4; i++) {
if (!eilvt_is_available(i))
continue;
ret = setup_ibs_ctl(i);
if (ret)
return ret;
return 0;
} }
printk(KERN_DEBUG "No EILVT entry available\n");
return -EBUSY;
}
static int __init_ibs_nmi(void)
{
int ret;
if (ibs_eilvt_valid())
return 0;
ret = force_ibs_eilvt_setup();
if (ret)
return ret;
if (!ibs_eilvt_valid())
return -EFAULT;
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
return 0; return 0;
} }
......
...@@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif #endif
seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name); seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next) for (action=action->next; action; action = action->next)
......
...@@ -141,9 +141,9 @@ typedef struct irq_data_isa { ...@@ -141,9 +141,9 @@ typedef struct irq_data_isa {
__u8 rcvhdr[8]; __u8 rcvhdr[8];
} irq_data_isa; } irq_data_isa;
typedef union irq_data { typedef union act2000_irq_data {
irq_data_isa isa; irq_data_isa isa;
} irq_data; } act2000_irq_data;
/* /*
* Per card driver data * Per card driver data
...@@ -176,7 +176,7 @@ typedef struct act2000_card { ...@@ -176,7 +176,7 @@ typedef struct act2000_card {
char *status_buf_read; char *status_buf_read;
char *status_buf_write; char *status_buf_write;
char *status_buf_end; char *status_buf_end;
irq_data idat; /* Data used for IRQ handler */ act2000_irq_data idat; /* Data used for IRQ handler */
isdn_if interface; /* Interface to upper layer */ isdn_if interface; /* Interface to upper layer */
char regname[35]; /* Name used for request_region */ char regname[35]; /* Name used for request_region */
} act2000_card; } act2000_card;
......
...@@ -801,6 +801,16 @@ static void closecard(int cardnr) ...@@ -801,6 +801,16 @@ static void closecard(int cardnr)
ll_unload(csta); ll_unload(csta);
} }
static irqreturn_t card_irq(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
irqreturn_t ret = cs->irq_func(intno, cs);
if (ret == IRQ_HANDLED)
cs->irq_cnt++;
return ret;
}
static int init_card(struct IsdnCardState *cs) static int init_card(struct IsdnCardState *cs)
{ {
int irq_cnt, cnt = 3, ret; int irq_cnt, cnt = 3, ret;
...@@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs) ...@@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs)
ret = cs->cardmsg(cs, CARD_INIT, NULL); ret = cs->cardmsg(cs, CARD_INIT, NULL);
return(ret); return(ret);
} }
irq_cnt = kstat_irqs(cs->irq); irq_cnt = cs->irq_cnt = 0;
printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
cs->irq, irq_cnt); cs->irq, irq_cnt);
if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
cs->irq); cs->irq);
return 1; return 1;
...@@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs) ...@@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs)
/* Timeout 10ms */ /* Timeout 10ms */
msleep(10); msleep(10);
printk(KERN_INFO "%s: IRQ %d count %d\n", printk(KERN_INFO "%s: IRQ %d count %d\n",
CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); CardType[cs->typ], cs->irq, cs->irq_cnt);
if (kstat_irqs(cs->irq) == irq_cnt) { if (cs->irq_cnt == irq_cnt) {
printk(KERN_WARNING printk(KERN_WARNING
"%s: IRQ(%d) getting no interrupts during init %d\n", "%s: IRQ(%d) getting no interrupts during init %d\n",
CardType[cs->typ], cs->irq, 4 - cnt); CardType[cs->typ], cs->irq, 4 - cnt);
......
...@@ -959,6 +959,7 @@ struct IsdnCardState { ...@@ -959,6 +959,7 @@ struct IsdnCardState {
u_long event; u_long event;
struct work_struct tqueue; struct work_struct tqueue;
struct timer_list dbusytimer; struct timer_list dbusytimer;
unsigned int irq_cnt;
#ifdef ERROR_STATISTIC #ifdef ERROR_STATISTIC
int err_crc; int err_crc;
int err_tx; int err_tx;
......
...@@ -78,7 +78,7 @@ struct sih { ...@@ -78,7 +78,7 @@ struct sih {
u8 irq_lines; /* number of supported irq lines */ u8 irq_lines; /* number of supported irq lines */
/* SIR ignored -- set interrupt, for testing only */ /* SIR ignored -- set interrupt, for testing only */
struct irq_data { struct sih_irq_data {
u8 isr_offset; u8 isr_offset;
u8 imr_offset; u8 imr_offset;
} mask[2]; } mask[2];
...@@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) ...@@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
twl4030_irq_chip = dummy_irq_chip; twl4030_irq_chip = dummy_irq_chip;
twl4030_irq_chip.name = "twl4030"; twl4030_irq_chip.name = "twl4030";
twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
for (i = irq_base; i < irq_end; i++) { for (i = irq_base; i < irq_end; i++) {
set_irq_chip_and_handler(i, &twl4030_irq_chip, set_irq_chip_and_handler(i, &twl4030_irq_chip,
......
...@@ -1221,9 +1221,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) ...@@ -1221,9 +1221,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
} }
} }
void dmar_msi_unmask(unsigned int irq) void dmar_msi_unmask(struct irq_data *data)
{ {
struct intel_iommu *iommu = get_irq_data(irq); struct intel_iommu *iommu = irq_data_get_irq_data(data);
unsigned long flag; unsigned long flag;
/* unmask it */ /* unmask it */
...@@ -1234,10 +1234,10 @@ void dmar_msi_unmask(unsigned int irq) ...@@ -1234,10 +1234,10 @@ void dmar_msi_unmask(unsigned int irq)
spin_unlock_irqrestore(&iommu->register_lock, flag); spin_unlock_irqrestore(&iommu->register_lock, flag);
} }
void dmar_msi_mask(unsigned int irq) void dmar_msi_mask(struct irq_data *data)
{ {
unsigned long flag; unsigned long flag;
struct intel_iommu *iommu = get_irq_data(irq); struct intel_iommu *iommu = irq_data_get_irq_data(data);
/* mask it */ /* mask it */
spin_lock_irqsave(&iommu->register_lock, flag); spin_lock_irqsave(&iommu->register_lock, flag);
......
...@@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) ...@@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
*msg = cfg->msg; *msg = cfg->msg;
} }
void mask_ht_irq(unsigned int irq) void mask_ht_irq(struct irq_data *data)
{ {
struct ht_irq_cfg *cfg; struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
struct ht_irq_msg msg; struct ht_irq_msg msg = cfg->msg;
cfg = get_irq_data(irq);
msg = cfg->msg;
msg.address_lo |= 1; msg.address_lo |= 1;
write_ht_irq_msg(irq, &msg); write_ht_irq_msg(data->irq, &msg);
} }
void unmask_ht_irq(unsigned int irq) void unmask_ht_irq(struct irq_data *data)
{ {
struct ht_irq_cfg *cfg; struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
struct ht_irq_msg msg; struct ht_irq_msg msg = cfg->msg;
cfg = get_irq_data(irq);
msg = cfg->msg;
msg.address_lo &= ~1; msg.address_lo &= ~1;
write_ht_irq_msg(irq, &msg); write_ht_irq_msg(data->irq, &msg);
} }
/** /**
......
...@@ -46,109 +46,24 @@ static __init int setup_intremap(char *str) ...@@ -46,109 +46,24 @@ static __init int setup_intremap(char *str)
} }
early_param("intremap", setup_intremap); early_param("intremap", setup_intremap);
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
u16 sub_handle;
u8 irte_mask;
};
#ifdef CONFIG_GENERIC_HARDIRQS
static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
{
struct irq_2_iommu *iommu;
iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
return iommu;
}
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
if (WARN_ON_ONCE(!desc))
return NULL;
return desc->irq_2_iommu;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
desc = irq_to_desc(irq);
if (!desc) {
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
return NULL;
}
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
return desc->irq_2_iommu;
}
#else /* !CONFIG_SPARSE_IRQ */
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
if (irq < nr_irqs)
return &irq_2_iommuX[irq];
return NULL;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
return irq_2_iommu(irq);
}
#endif
static DEFINE_SPINLOCK(irq_2_ir_lock); static DEFINE_SPINLOCK(irq_2_ir_lock);
static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_2_iommu *irq_iommu;
irq_iommu = irq_2_iommu(irq);
if (!irq_iommu)
return NULL;
if (!irq_iommu->iommu)
return NULL;
return irq_iommu;
}
int irq_remapped(int irq)
{ {
return valid_irq_2_iommu(irq) != NULL; struct irq_cfg *cfg = get_irq_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
} }
int get_irte(int irq, struct irte *entry) int get_irte(int irq, struct irte *entry)
{ {
int index; struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_2_iommu *irq_iommu;
unsigned long flags; unsigned long flags;
int index;
if (!entry) if (!entry || !irq_iommu)
return -1; return -1;
spin_lock_irqsave(&irq_2_ir_lock, flags); spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
index = irq_iommu->irte_index + irq_iommu->sub_handle; index = irq_iommu->irte_index + irq_iommu->sub_handle;
*entry = *(irq_iommu->iommu->ir_table->base + index); *entry = *(irq_iommu->iommu->ir_table->base + index);
...@@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry) ...@@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry)
int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{ {
struct ir_table *table = iommu->ir_table; struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu; struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
u16 index, start_index; u16 index, start_index;
unsigned int mask = 0; unsigned int mask = 0;
unsigned long flags; unsigned long flags;
int i; int i;
if (!count) if (!count || !irq_iommu)
return -1;
#ifndef CONFIG_SPARSE_IRQ
/* protect irq_2_iommu_alloc later */
if (irq >= nr_irqs)
return -1; return -1;
#endif
/* /*
* start the IRTE search from index 0. * start the IRTE search from index 0.
...@@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) ...@@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
for (i = index; i < index + count; i++) for (i = index; i < index + count; i++)
table->base[i].present = 1; table->base[i].present = 1;
irq_iommu = irq_2_iommu_alloc(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate irq_2_iommu\n");
return -1;
}
irq_iommu->iommu = iommu; irq_iommu->iommu = iommu;
irq_iommu->irte_index = index; irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0; irq_iommu->sub_handle = 0;
...@@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) ...@@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
int map_irq_to_irte_handle(int irq, u16 *sub_handle) int map_irq_to_irte_handle(int irq, u16 *sub_handle)
{ {
int index; struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_2_iommu *irq_iommu;
unsigned long flags; unsigned long flags;
int index;
spin_lock_irqsave(&irq_2_ir_lock, flags); if (!irq_iommu)
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1; return -1;
}
spin_lock_irqsave(&irq_2_ir_lock, flags);
*sub_handle = irq_iommu->sub_handle; *sub_handle = irq_iommu->sub_handle;
index = irq_iommu->irte_index; index = irq_iommu->irte_index;
spin_unlock_irqrestore(&irq_2_ir_lock, flags); spin_unlock_irqrestore(&irq_2_ir_lock, flags);
...@@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) ...@@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{ {
struct irq_2_iommu *irq_iommu; struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags); if (!irq_iommu)
irq_iommu = irq_2_iommu_alloc(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate irq_2_iommu\n");
return -1; return -1;
}
spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu->iommu = iommu; irq_iommu->iommu = iommu;
irq_iommu->irte_index = index; irq_iommu->irte_index = index;
...@@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) ...@@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
return 0; return 0;
} }
int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
{
struct irq_2_iommu *irq_iommu;
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
irq_iommu->sub_handle = 0;
irq_2_iommu(irq)->irte_mask = 0;
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
int modify_irte(int irq, struct irte *irte_modified) int modify_irte(int irq, struct irte *irte_modified)
{ {
int rc; struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
int index;
struct irte *irte;
struct intel_iommu *iommu; struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
unsigned long flags; unsigned long flags;
struct irte *irte;
int rc, index;
spin_lock_irqsave(&irq_2_ir_lock, flags); if (!irq_iommu)
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1; return -1;
}
spin_lock_irqsave(&irq_2_ir_lock, flags);
iommu = irq_iommu->iommu; iommu = irq_iommu->iommu;
...@@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified) ...@@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified)
return rc; return rc;
} }
int flush_irte(int irq)
{
int rc;
int index;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
}
iommu = irq_iommu->iommu;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
}
struct intel_iommu *map_hpet_to_ir(u8 hpet_id) struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
{ {
int i; int i;
...@@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) ...@@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
int free_irte(int irq) int free_irte(int irq)
{ {
int rc = 0; struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
struct irq_2_iommu *irq_iommu;
unsigned long flags; unsigned long flags;
int rc;
spin_lock_irqsave(&irq_2_ir_lock, flags); if (!irq_iommu)
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1; return -1;
}
spin_lock_irqsave(&irq_2_ir_lock, flags);
rc = clear_entries(irq_iommu); rc = clear_entries(irq_iommu);
......
...@@ -170,33 +170,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag) ...@@ -170,33 +170,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
desc->masked = __msix_mask_irq(desc, flag); desc->masked = __msix_mask_irq(desc, flag);
} }
static void msi_set_mask_bit(unsigned irq, u32 flag) static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{ {
struct msi_desc *desc = get_irq_msi(irq); struct msi_desc *desc = irq_data_get_msi(data);
if (desc->msi_attrib.is_msix) { if (desc->msi_attrib.is_msix) {
msix_mask_irq(desc, flag); msix_mask_irq(desc, flag);
readl(desc->mask_base); /* Flush write to device */ readl(desc->mask_base); /* Flush write to device */
} else { } else {
unsigned offset = irq - desc->dev->irq; unsigned offset = data->irq - desc->dev->irq;
msi_mask_irq(desc, 1 << offset, flag << offset); msi_mask_irq(desc, 1 << offset, flag << offset);
} }
} }
void mask_msi_irq(unsigned int irq) void mask_msi_irq(struct irq_data *data)
{ {
msi_set_mask_bit(irq, 1); msi_set_mask_bit(data, 1);
} }
void unmask_msi_irq(unsigned int irq) void unmask_msi_irq(struct irq_data *data)
{ {
msi_set_mask_bit(irq, 0); msi_set_mask_bit(data, 0);
} }
void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{ {
struct msi_desc *entry = get_irq_desc_msi(desc);
BUG_ON(entry->dev->current_state != PCI_D0); BUG_ON(entry->dev->current_state != PCI_D0);
if (entry->msi_attrib.is_msix) { if (entry->msi_attrib.is_msix) {
...@@ -227,15 +225,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) ...@@ -227,15 +225,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
void read_msi_msg(unsigned int irq, struct msi_msg *msg) void read_msi_msg(unsigned int irq, struct msi_msg *msg)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct msi_desc *entry = get_irq_msi(irq);
read_msi_msg_desc(desc, msg); __read_msi_msg(entry, msg);
} }
void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{ {
struct msi_desc *entry = get_irq_desc_msi(desc);
/* Assert that the cache is valid, assuming that /* Assert that the cache is valid, assuming that
* valid messages are not all-zeroes. */ * valid messages are not all-zeroes. */
BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
...@@ -246,15 +242,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) ...@@ -246,15 +242,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct msi_desc *entry = get_irq_msi(irq);
get_cached_msi_msg_desc(desc, msg); __get_cached_msi_msg(entry, msg);
} }
void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{ {
struct msi_desc *entry = get_irq_desc_msi(desc);
if (entry->dev->current_state != PCI_D0) { if (entry->dev->current_state != PCI_D0) {
/* Don't touch the hardware now */ /* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) { } else if (entry->msi_attrib.is_msix) {
...@@ -292,9 +286,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) ...@@ -292,9 +286,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
void write_msi_msg(unsigned int irq, struct msi_msg *msg) void write_msi_msg(unsigned int irq, struct msi_msg *msg)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct msi_desc *entry = get_irq_msi(irq);
write_msi_msg_desc(desc, msg); __write_msi_msg(entry, msg);
} }
static void free_msi_irqs(struct pci_dev *dev) static void free_msi_irqs(struct pci_dev *dev)
......
...@@ -338,30 +338,29 @@ static void unmask_evtchn(int port) ...@@ -338,30 +338,29 @@ static void unmask_evtchn(int port)
static int find_unbound_irq(void) static int find_unbound_irq(void)
{ {
int irq; struct irq_data *data;
struct irq_desc *desc; int irq, res;
for (irq = 0; irq < nr_irqs; irq++) { for (irq = 0; irq < nr_irqs; irq++) {
desc = irq_to_desc(irq); data = irq_get_irq_data(irq);
/* only 0->15 have init'd desc; handle irq > 16 */ /* only 0->15 have init'd desc; handle irq > 16 */
if (desc == NULL) if (!data)
break; break;
if (desc->chip == &no_irq_chip) if (data->chip == &no_irq_chip)
break; break;
if (desc->chip != &xen_dynamic_chip) if (data->chip != &xen_dynamic_chip)
continue; continue;
if (irq_info[irq].type == IRQT_UNBOUND) if (irq_info[irq].type == IRQT_UNBOUND)
break; return irq;
} }
if (irq == nr_irqs) if (irq == nr_irqs)
panic("No available IRQ to bind to: increase nr_irqs!\n"); panic("No available IRQ to bind to: increase nr_irqs!\n");
desc = irq_to_desc_alloc_node(irq, 0); res = irq_alloc_desc_at(irq, 0);
if (WARN_ON(desc == NULL))
return -1;
dynamic_irq_init_keep_chip_data(irq); if (WARN_ON(res != irq))
return -1;
return irq; return irq;
} }
...@@ -495,7 +494,7 @@ static void unbind_from_irq(unsigned int irq) ...@@ -495,7 +494,7 @@ static void unbind_from_irq(unsigned int irq)
if (irq_info[irq].type != IRQT_UNBOUND) { if (irq_info[irq].type != IRQT_UNBOUND) {
irq_info[irq] = mk_unbound_info(); irq_info[irq] = mk_unbound_info();
dynamic_irq_cleanup(irq); irq_free_desc(irq);
} }
spin_unlock(&irq_mapping_update_lock); spin_unlock(&irq_mapping_update_lock);
......
...@@ -106,6 +106,7 @@ struct irte { ...@@ -106,6 +106,7 @@ struct irte {
__u64 high; __u64 high;
}; };
}; };
#ifdef CONFIG_INTR_REMAP #ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled; extern int intr_remapping_enabled;
extern int intr_remapping_supported(void); extern int intr_remapping_supported(void);
...@@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); ...@@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
u16 sub_handle); u16 sub_handle);
extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index);
extern int flush_irte(int irq);
extern int free_irte(int irq); extern int free_irte(int irq);
extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic); extern struct intel_iommu *map_ioapic_to_ir(int apic);
extern struct intel_iommu *map_hpet_to_ir(u8 id); extern struct intel_iommu *map_hpet_to_ir(u8 id);
...@@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) ...@@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
return 0; return 0;
} }
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1) #define enable_intr_remapping(mode) (-1)
#define disable_intr_remapping() (0) #define disable_intr_remapping() (0)
#define reenable_intr_remapping(mode) (0) #define reenable_intr_remapping(mode) (0)
...@@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) ...@@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
/* Can't use the common MSI interrupt functions /* Can't use the common MSI interrupt functions
* since DMAR is not a pci device * since DMAR is not a pci device
*/ */
extern void dmar_msi_unmask(unsigned int irq); struct irq_data;
extern void dmar_msi_mask(unsigned int irq); extern void dmar_msi_unmask(struct irq_data *data);
extern void dmar_msi_mask(struct irq_data *data);
extern void dmar_msi_read(int irq, struct msi_msg *msg); extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg); extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu); extern int dmar_set_interrupt(struct intel_iommu *iommu);
......
...@@ -9,8 +9,9 @@ struct ht_irq_msg { ...@@ -9,8 +9,9 @@ struct ht_irq_msg {
/* Helper functions.. */ /* Helper functions.. */
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
void mask_ht_irq(unsigned int irq); struct irq_data;
void unmask_ht_irq(unsigned int irq); void mask_ht_irq(struct irq_data *data);
void unmask_ht_irq(struct irq_data *data);
/* The arch hook for getting things started */ /* The arch hook for getting things started */
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
......
...@@ -647,11 +647,8 @@ static inline void init_irq_proc(void) ...@@ -647,11 +647,8 @@ static inline void init_irq_proc(void)
struct seq_file; struct seq_file;
int show_interrupts(struct seq_file *p, void *v); int show_interrupts(struct seq_file *p, void *v);
struct irq_desc;
extern int early_irq_init(void); extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void); extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void); extern int arch_early_irq_init(void);
extern int arch_init_chip_data(struct irq_desc *desc, int node);
#endif #endif
This diff is collapsed.
#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H
/*
* Core internal functions to deal with irq descriptors
*
* This include will move to kernel/irq once we cleaned up the tree.
* For now it's included from <linux/irq.h>
*/
struct proc_dir_entry;
struct timer_rand_state;
/**
* struct irq_desc - interrupt descriptor
* @irq_data: per irq and chip data passed down to chip functions
* @timer_rand_state: pointer to timer rand state struct
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @action: the irq action chain
* @status: status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @pending_mask: pending rebalanced interrupts
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
struct irq_data irq_data;
#else
/*
* This union will go away, once we fixed the direct access to
* irq_desc all over the place. The direct fields are a 1:1
* overlay of irq_data.
*/
union {
struct irq_data irq_data;
struct {
unsigned int irq;
unsigned int node;
struct irq_chip *chip;
void *handler_data;
void *chip_data;
struct msi_desc *msi_desc;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
#endif
};
};
#endif
struct timer_rand_state *timer_rand_state;
unsigned int *kstat_irqs;
irq_flow_handler_t handle_irq;
struct irqaction *action; /* IRQ action list */
unsigned int status; /* IRQ status */
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
raw_spinlock_t lock;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
#endif
#endif
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
const char *name;
} ____cacheline_internodealigned_in_smp;
#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
#endif
/* Will be removed once the last users in power and sh are gone */
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
{
return desc;
}
#ifdef CONFIG_GENERIC_HARDIRQS
#define get_irq_desc_chip(desc) ((desc)->irq_data.chip)
#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data)
#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data)
#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
/*
* Monolithic do_IRQ implementation.
*/
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
extern unsigned int __do_IRQ(unsigned int irq);
#endif
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
*/
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
desc->handle_irq(irq, desc);
#else
if (likely(desc->handle_irq))
desc->handle_irq(irq, desc);
else
__do_IRQ(irq);
#endif
}
static inline void generic_handle_irq(unsigned int irq)
{
generic_handle_irq_desc(irq, irq_to_desc(irq));
}
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
static inline int irq_balancing_disabled(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
return desc->status & IRQ_NO_BALANCING_MASK;
}
/* caller has locked the irq_desc and both params are valid */
static inline void __set_irq_handler_unlocked(int irq,
irq_flow_handler_t handler)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
#endif
#endif
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
extern int nr_irqs; extern int nr_irqs;
extern struct irq_desc *irq_to_desc(unsigned int irq); extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
# define for_each_irq_desc(irq, desc) \ # define for_each_irq_desc(irq, desc) \
for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
...@@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); ...@@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
#define irq_node(irq) 0 #define irq_node(irq) 0
#endif #endif
# define for_each_active_irq(irq) \
for (irq = irq_get_next_irq(0); irq < nr_irqs; \
irq = irq_get_next_irq(irq + 1))
#endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* CONFIG_GENERIC_HARDIRQS */
#define for_each_irq_nr(irq) \ #define for_each_irq_nr(irq) \
......
...@@ -435,14 +435,6 @@ do { \ ...@@ -435,14 +435,6 @@ do { \
#endif /* CONFIG_LOCKDEP */ #endif /* CONFIG_LOCKDEP */
#ifdef CONFIG_GENERIC_HARDIRQS
extern void early_init_irq_lock_class(void);
#else
static inline void early_init_irq_lock_class(void)
{
}
#endif
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
extern void early_boot_irqs_off(void); extern void early_boot_irqs_off(void);
extern void early_boot_irqs_on(void); extern void early_boot_irqs_on(void);
......
...@@ -10,12 +10,13 @@ struct msi_msg { ...@@ -10,12 +10,13 @@ struct msi_msg {
}; };
/* Helper functions */ /* Helper functions */
struct irq_desc; struct irq_data;
extern void mask_msi_irq(unsigned int irq); struct msi_desc;
extern void unmask_msi_irq(unsigned int irq); extern void mask_msi_irq(struct irq_data *data);
extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void unmask_msi_irq(struct irq_data *data);
extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
......
...@@ -339,6 +339,8 @@ config AUDIT_TREE ...@@ -339,6 +339,8 @@ config AUDIT_TREE
depends on AUDITSYSCALL depends on AUDITSYSCALL
select FSNOTIFY select FSNOTIFY
source "kernel/irq/Kconfig"
menu "RCU Subsystem" menu "RCU Subsystem"
choice choice
......
...@@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void) ...@@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void)
local_irq_disable(); local_irq_disable();
early_boot_irqs_off(); early_boot_irqs_off();
early_init_irq_lock_class();
/* /*
* Interrupts are still disabled. Do necessary setups, then * Interrupts are still disabled. Do necessary setups, then
......
config HAVE_GENERIC_HARDIRQS
def_bool n
if HAVE_GENERIC_HARDIRQS
menu "IRQ subsystem"
#
# Interrupt subsystem related configuration options
#
config GENERIC_HARDIRQS
def_bool y
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
# Select this to disable the deprecated stuff
config GENERIC_HARDIRQS_NO_DEPRECATED
def_bool n
# Options selectable by the architecture code
config HAVE_SPARSE_IRQ
def_bool n
config GENERIC_IRQ_PROBE
def_bool n
config GENERIC_PENDING_IRQ
def_bool n
config AUTO_IRQ_AFFINITY
def_bool n
config IRQ_PER_CPU
def_bool n
config HARDIRQS_SW_RESEND
def_bool n
config SPARSE_IRQ
bool "Support sparse irq numbering"
depends on HAVE_SPARSE_IRQ
---help---
Sparse irq numbering is useful for distro kernels that want
to define a high CONFIG_NR_CPUS value but still want to have
low kernel memory footprint on smaller machines.
( Sparse irqs can also be beneficial on NUMA boxes, as they spread
out the interrupt descriptors in a more NUMA-friendly way. )
If you don't know what to do here, say N.
endmenu
endif
obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o
obj-$(CONFIG_PM_SLEEP) += pm.o obj-$(CONFIG_PM_SLEEP) += pm.o
...@@ -57,9 +57,10 @@ unsigned long probe_irq_on(void) ...@@ -57,9 +57,10 @@ unsigned long probe_irq_on(void)
* Some chips need to know about probing in * Some chips need to know about probing in
* progress: * progress:
*/ */
if (desc->chip->set_type) if (desc->irq_data.chip->irq_set_type)
desc->chip->set_type(i, IRQ_TYPE_PROBE); desc->irq_data.chip->irq_set_type(&desc->irq_data,
desc->chip->startup(i); IRQ_TYPE_PROBE);
desc->irq_data.chip->irq_startup(&desc->irq_data);
} }
raw_spin_unlock_irq(&desc->lock); raw_spin_unlock_irq(&desc->lock);
} }
...@@ -76,7 +77,7 @@ unsigned long probe_irq_on(void) ...@@ -76,7 +77,7 @@ unsigned long probe_irq_on(void)
raw_spin_lock_irq(&desc->lock); raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) { if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING; desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->chip->startup(i)) if (desc->irq_data.chip->irq_startup(&desc->irq_data))
desc->status |= IRQ_PENDING; desc->status |= IRQ_PENDING;
} }
raw_spin_unlock_irq(&desc->lock); raw_spin_unlock_irq(&desc->lock);
...@@ -98,7 +99,7 @@ unsigned long probe_irq_on(void) ...@@ -98,7 +99,7 @@ unsigned long probe_irq_on(void)
/* It triggered already - consider it spurious. */ /* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) { if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT; desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i); desc->irq_data.chip->irq_shutdown(&desc->irq_data);
} else } else
if (i < 32) if (i < 32)
mask |= 1 << i; mask |= 1 << i;
...@@ -137,7 +138,7 @@ unsigned int probe_irq_mask(unsigned long val) ...@@ -137,7 +138,7 @@ unsigned int probe_irq_mask(unsigned long val)
mask |= 1 << i; mask |= 1 << i;
desc->status = status & ~IRQ_AUTODETECT; desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i); desc->irq_data.chip->irq_shutdown(&desc->irq_data);
} }
raw_spin_unlock_irq(&desc->lock); raw_spin_unlock_irq(&desc->lock);
} }
...@@ -181,7 +182,7 @@ int probe_irq_off(unsigned long val) ...@@ -181,7 +182,7 @@ int probe_irq_off(unsigned long val)
nr_of_irqs++; nr_of_irqs++;
} }
desc->status = status & ~IRQ_AUTODETECT; desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i); desc->irq_data.chip->irq_shutdown(&desc->irq_data);
} }
raw_spin_unlock_irq(&desc->lock); raw_spin_unlock_irq(&desc->lock);
} }
......
This diff is collapsed.
/*
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
*
* This file contains the dummy interrupt chip implementation
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include "internals.h"
/*
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
*/
static void ack_bad(struct irq_data *data)
{
struct irq_desc *desc = irq_data_to_desc(data);
print_irq_desc(data->irq, desc);
ack_bad_irq(data->irq);
}
/*
* NOP functions
*/
static void noop(struct irq_data *data) { }
static unsigned int noop_ret(struct irq_data *data)
{
return 0;
}
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
static void compat_noop(unsigned int irq) { }
#define END_INIT .end = compat_noop
#else
#define END_INIT
#endif
/*
* Generic no controller implementation
*/
struct irq_chip no_irq_chip = {
.name = "none",
.irq_startup = noop_ret,
.irq_shutdown = noop,
.irq_enable = noop,
.irq_disable = noop,
.irq_ack = ack_bad,
END_INIT
};
/*
* Generic dummy implementation which can be used for
* real dumb interrupt sources
*/
struct irq_chip dummy_irq_chip = {
.name = "dummy",
.irq_startup = noop_ret,
.irq_shutdown = noop,
.irq_enable = noop,
.irq_disable = noop,
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
END_INIT
};
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment