Commit 2d385336 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "Updates for the interrupt subsystem:

  Treewide:

    - Cleanup of setup_irq() which is not longer required because the
      memory allocator is available early.

      Most cleanup changes come through the various maintainer trees, so
      the final removal of setup_irq() is postponed towards the end of
      the merge window.

  Core:

    - Protection against unsafe invocation of interrupt handlers and
      unsafe interrupt injection including a fixup of the offending
      PCI/AER error injection mechanism.

      Invoking interrupt handlers from arbitrary contexts, i.e. outside
      of an actual interrupt, can cause inconsistent state on the
      fragile x86 interrupt affinity changing hardware trainwreck.

  Drivers:

    - Second wave of support for the new ARM GICv4.1

    - Multi-instance support for Xilinx and PLIC interrupt controllers

    - CPU-Hotplug support for PLIC

    - The obligatory new driver for X1000 TCU

    - Enhancements, cleanups and fixes all over the place"

* tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (58 commits)
  unicore32: Replace setup_irq() by request_irq()
  sh: Replace setup_irq() by request_irq()
  hexagon: Replace setup_irq() by request_irq()
  c6x: Replace setup_irq() by request_irq()
  alpha: Replace setup_irq() by request_irq()
  irqchip/gic-v4.1: Eagerly vmap vPEs
  irqchip/gic-v4.1: Add VSGI property setup
  irqchip/gic-v4.1: Add VSGI allocation/teardown
  irqchip/gic-v4.1: Move doorbell management to the GICv4 abstraction layer
  irqchip/gic-v4.1: Plumb set_vcpu_affinity SGI callbacks
  irqchip/gic-v4.1: Plumb get/set_irqchip_state SGI callbacks
  irqchip/gic-v4.1: Plumb mask/unmask SGI callbacks
  irqchip/gic-v4.1: Add initial SGI configuration
  irqchip/gic-v4.1: Plumb skeletal VSGI irqchip
  irqchip/stm32: Retrigger both in eoi and unmask callbacks
  irqchip/gic-v3: Move irq_domain_update_bus_token to after checking for NULL domain
  irqchip/xilinx: Do not call irq_set_default_host()
  irqchip/xilinx: Enable generic irq multi handler
  irqchip/xilinx: Fill error code when irq domain registration fails
  irqchip/xilinx: Add support for multiple instances
  ...
parents 673b41e0 8a13b02a
...@@ -213,32 +213,13 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, ...@@ -213,32 +213,13 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
* The special RTC interrupt type. The interrupt itself was * The special RTC interrupt type. The interrupt itself was
* processed by PALcode, and comes in via entInt vector 1. * processed by PALcode, and comes in via entInt vector 1.
*/ */
struct irqaction timer_irqaction = {
.handler = rtc_timer_interrupt,
.name = "timer",
};
void __init void __init
init_rtc_irq(void) init_rtc_irq(irq_handler_t handler)
{ {
irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
handle_percpu_irq, "RTC"); handle_percpu_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction); if (!handler)
handler = rtc_timer_interrupt;
if (request_irq(RTC_IRQ, handler, 0, "timer", NULL))
pr_err("Failed to register timer interrupt\n");
} }
/* Dummy irqactions. */
struct irqaction isa_cascade_irqaction = {
.handler = no_action,
.name = "isa-cascade"
};
struct irqaction timer_cascade_irqaction = {
.handler = no_action,
.name = "timer-cascade"
};
struct irqaction halt_switch_irqaction = {
.handler = no_action,
.name = "halt-switch"
};
...@@ -82,11 +82,6 @@ struct irq_chip i8259a_irq_type = { ...@@ -82,11 +82,6 @@ struct irq_chip i8259a_irq_type = {
void __init void __init
init_i8259a_irqs(void) init_i8259a_irqs(void)
{ {
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
};
long i; long i;
outb(0xff, 0x21); /* mask all of 8259A-1 */ outb(0xff, 0x21); /* mask all of 8259A-1 */
...@@ -96,7 +91,8 @@ init_i8259a_irqs(void) ...@@ -96,7 +91,8 @@ init_i8259a_irqs(void)
irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
} }
setup_irq(2, &cascade); if (request_irq(2, no_action, 0, "cascade", NULL))
pr_err("Failed to request irq 2 (cascade)\n");
} }
......
...@@ -21,14 +21,9 @@ extern void isa_no_iack_sc_device_interrupt(unsigned long); ...@@ -21,14 +21,9 @@ extern void isa_no_iack_sc_device_interrupt(unsigned long);
extern void srm_device_interrupt(unsigned long); extern void srm_device_interrupt(unsigned long);
extern void pyxis_device_interrupt(unsigned long); extern void pyxis_device_interrupt(unsigned long);
extern struct irqaction timer_irqaction;
extern struct irqaction isa_cascade_irqaction;
extern struct irqaction timer_cascade_irqaction;
extern struct irqaction halt_switch_irqaction;
extern void init_srm_irqs(long, unsigned long); extern void init_srm_irqs(long, unsigned long);
extern void init_pyxis_irqs(unsigned long); extern void init_pyxis_irqs(unsigned long);
extern void init_rtc_irq(void); extern void init_rtc_irq(irq_handler_t handler);
extern void common_init_isa_dma(void); extern void common_init_isa_dma(void);
......
...@@ -107,5 +107,6 @@ init_pyxis_irqs(unsigned long ignore_mask) ...@@ -107,5 +107,6 @@ init_pyxis_irqs(unsigned long ignore_mask)
irq_set_status_flags(i, IRQ_LEVEL); irq_set_status_flags(i, IRQ_LEVEL);
} }
setup_irq(16+7, &isa_cascade_irqaction); if (request_irq(16 + 7, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
} }
...@@ -133,7 +133,8 @@ alcor_init_irq(void) ...@@ -133,7 +133,8 @@ alcor_init_irq(void)
init_i8259a_irqs(); init_i8259a_irqs();
common_init_isa_dma(); common_init_isa_dma();
setup_irq(16+31, &isa_cascade_irqaction); if (request_irq(16 + 31, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
} }
......
...@@ -112,7 +112,8 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) ...@@ -112,7 +112,8 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
} }
common_init_isa_dma(); common_init_isa_dma();
setup_irq(16+4, &isa_cascade_irqaction); if (request_irq(16 + 4, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
} }
#ifndef CONFIG_ALPHA_PC164 #ifndef CONFIG_ALPHA_PC164
......
...@@ -123,7 +123,8 @@ eb64p_init_irq(void) ...@@ -123,7 +123,8 @@ eb64p_init_irq(void)
} }
common_init_isa_dma(); common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction); if (request_irq(16 + 5, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
} }
/* /*
......
...@@ -397,7 +397,7 @@ marvel_init_pci(void) ...@@ -397,7 +397,7 @@ marvel_init_pci(void)
static void __init static void __init
marvel_init_rtc(void) marvel_init_rtc(void)
{ {
init_rtc_irq(); init_rtc_irq(NULL);
} }
static void static void
......
...@@ -81,8 +81,10 @@ miata_init_irq(void) ...@@ -81,8 +81,10 @@ miata_init_irq(void)
init_pyxis_irqs(0x63b0000); init_pyxis_irqs(0x63b0000);
common_init_isa_dma(); common_init_isa_dma();
setup_irq(16+2, &halt_switch_irqaction); /* SRM only? */ if (request_irq(16 + 2, no_action, 0, "halt-switch", NULL))
setup_irq(16+6, &timer_cascade_irqaction); pr_err("Failed to register halt-switch interrupt\n");
if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL))
pr_err("Failed to register timer-cascade interrupt\n");
} }
......
...@@ -82,7 +82,8 @@ ruffian_init_rtc(void) ...@@ -82,7 +82,8 @@ ruffian_init_rtc(void)
outb(0x31, 0x42); outb(0x31, 0x42);
outb(0x13, 0x42); outb(0x13, 0x42);
setup_irq(0, &timer_irqaction); if (request_irq(0, rtc_timer_interrupt, 0, "timer", NULL))
pr_err("Failed to request irq 0 (timer)\n");
} }
static void static void
......
...@@ -106,7 +106,8 @@ rx164_init_irq(void) ...@@ -106,7 +106,8 @@ rx164_init_irq(void)
init_i8259a_irqs(); init_i8259a_irqs();
common_init_isa_dma(); common_init_isa_dma();
setup_irq(16+20, &isa_cascade_irqaction); if (request_irq(16 + 20, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
} }
......
...@@ -54,7 +54,8 @@ sx164_init_irq(void) ...@@ -54,7 +54,8 @@ sx164_init_irq(void)
else else
init_pyxis_irqs(0xff00003f0000UL); init_pyxis_irqs(0xff00003f0000UL);
setup_irq(16+6, &timer_cascade_irqaction); if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL))
pr_err("Failed to register timer-cascade interrupt\n");
} }
/* /*
......
...@@ -156,10 +156,6 @@ static void __init ...@@ -156,10 +156,6 @@ static void __init
wildfire_init_irq_per_pca(int qbbno, int pcano) wildfire_init_irq_per_pca(int qbbno, int pcano)
{ {
int i, irq_bias; int i, irq_bias;
static struct irqaction isa_enable = {
.handler = no_action,
.name = "isa_enable",
};
irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
+ pcano * WILDFIRE_IRQ_PER_PCA; + pcano * WILDFIRE_IRQ_PER_PCA;
...@@ -198,7 +194,8 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) ...@@ -198,7 +194,8 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
irq_set_status_flags(i + irq_bias, IRQ_LEVEL); irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
} }
setup_irq(32+irq_bias, &isa_enable); if (request_irq(32 + irq_bias, no_action, 0, "isa_enable", NULL))
pr_err("Failed to register isa_enable interrupt\n");
} }
static void __init static void __init
......
...@@ -242,7 +242,7 @@ common_init_rtc(void) ...@@ -242,7 +242,7 @@ common_init_rtc(void)
outb(0x31, 0x42); outb(0x31, 0x42);
outb(0x13, 0x42); outb(0x13, 0x42);
init_rtc_irq(); init_rtc_irq(NULL);
} }
...@@ -396,9 +396,7 @@ time_init(void) ...@@ -396,9 +396,7 @@ time_init(void)
if (alpha_using_qemu) { if (alpha_using_qemu) {
clocksource_register_hz(&qemu_cs, NSEC_PER_SEC); clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
init_qemu_clockevent(); init_qemu_clockevent();
init_rtc_irq(qemu_timer_interrupt);
timer_irqaction.handler = qemu_timer_interrupt;
init_rtc_irq();
return; return;
} }
......
...@@ -302,10 +302,13 @@ static int sa1111_retrigger_irq(struct irq_data *d) ...@@ -302,10 +302,13 @@ static int sa1111_retrigger_irq(struct irq_data *d)
break; break;
} }
if (i == 8) if (i == 8) {
pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
d->irq); d->irq);
return i == 8 ? -1 : 0; return 0;
}
return 1;
} }
static int sa1111_type_irq(struct irq_data *d, unsigned int flags) static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
......
...@@ -165,13 +165,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) ...@@ -165,13 +165,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction timer_iact = {
.name = "timer",
.flags = IRQF_TIMER,
.handler = timer_interrupt,
.dev_id = &t64_clockevent_device,
};
void __init timer64_init(void) void __init timer64_init(void)
{ {
struct clock_event_device *cd = &t64_clockevent_device; struct clock_event_device *cd = &t64_clockevent_device;
...@@ -238,7 +231,9 @@ void __init timer64_init(void) ...@@ -238,7 +231,9 @@ void __init timer64_init(void)
cd->cpumask = cpumask_of(smp_processor_id()); cd->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(cd); clockevents_register_device(cd);
setup_irq(cd->irq, &timer_iact); if (request_irq(cd->irq, timer_interrupt, IRQF_TIMER, "timer",
&t64_clockevent_device))
pr_err("Failed to request irq %d (timer)\n", cd->irq);
out: out:
of_node_put(np); of_node_put(np);
......
...@@ -114,12 +114,6 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) ...@@ -114,12 +114,6 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
local_irq_restore(flags); local_irq_restore(flags);
} }
static struct irqaction ipi_intdesc = {
.handler = handle_ipi,
.flags = IRQF_TRIGGER_RISING,
.name = "ipi_handler"
};
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)
{ {
} }
...@@ -132,8 +126,8 @@ void __init smp_prepare_boot_cpu(void) ...@@ -132,8 +126,8 @@ void __init smp_prepare_boot_cpu(void)
void start_secondary(void) void start_secondary(void)
{ {
unsigned int cpu;
unsigned long thread_ptr; unsigned long thread_ptr;
unsigned int cpu, irq;
/* Calculate thread_info pointer from stack pointer */ /* Calculate thread_info pointer from stack pointer */
__asm__ __volatile__( __asm__ __volatile__(
...@@ -155,7 +149,10 @@ void start_secondary(void) ...@@ -155,7 +149,10 @@ void start_secondary(void)
cpu = smp_processor_id(); cpu = smp_processor_id();
setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc); irq = BASE_IPI_IRQ + cpu;
if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, "ipi_handler",
NULL))
pr_err("Failed to request irq %u (ipi_handler)\n", irq);
/* Register the clock_event dummy */ /* Register the clock_event dummy */
setup_percpu_clockdev(); setup_percpu_clockdev();
...@@ -201,7 +198,7 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -201,7 +198,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
int i; int i, irq = BASE_IPI_IRQ;
/* /*
* should eventually have some sort of machine * should eventually have some sort of machine
...@@ -213,8 +210,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -213,8 +210,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
set_cpu_present(i, true); set_cpu_present(i, true);
/* Also need to register the interrupts for IPI */ /* Also need to register the interrupts for IPI */
if (max_cpus > 1) if (max_cpus > 1) {
setup_irq(BASE_IPI_IRQ, &ipi_intdesc); if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING,
"ipi_handler", NULL))
pr_err("Failed to request irq %d (ipi_handler)\n", irq);
}
} }
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
......
...@@ -143,13 +143,6 @@ static irqreturn_t timer_interrupt(int irq, void *devid) ...@@ -143,13 +143,6 @@ static irqreturn_t timer_interrupt(int irq, void *devid)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* This should also be pulled from devtree */
static struct irqaction rtos_timer_intdesc = {
.handler = timer_interrupt,
.flags = IRQF_TIMER | IRQF_TRIGGER_RISING,
.name = "rtos_timer"
};
/* /*
* time_init_deferred - called by start_kernel to set up timer/clock source * time_init_deferred - called by start_kernel to set up timer/clock source
* *
...@@ -163,6 +156,7 @@ void __init time_init_deferred(void) ...@@ -163,6 +156,7 @@ void __init time_init_deferred(void)
{ {
struct resource *resource = NULL; struct resource *resource = NULL;
struct clock_event_device *ce_dev = &hexagon_clockevent_dev; struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
unsigned long flag = IRQF_TIMER | IRQF_TRIGGER_RISING;
ce_dev->cpumask = cpu_all_mask; ce_dev->cpumask = cpu_all_mask;
...@@ -195,7 +189,8 @@ void __init time_init_deferred(void) ...@@ -195,7 +189,8 @@ void __init time_init_deferred(void)
#endif #endif
clockevents_register_device(ce_dev); clockevents_register_device(ce_dev);
setup_irq(ce_dev->irq, &rtos_timer_intdesc); if (request_irq(ce_dev->irq, timer_interrupt, flag, "rtos_timer", NULL))
pr_err("Failed to register rtos_timer interrupt\n");
} }
void __init time_init(void) void __init time_init(void)
......
...@@ -47,6 +47,8 @@ config MICROBLAZE ...@@ -47,6 +47,8 @@ config MICROBLAZE
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU select MMU_GATHER_NO_RANGE if MMU
select SPARSE_IRQ select SPARSE_IRQ
select GENERIC_IRQ_MULTI_HANDLER
select HANDLE_DOMAIN_IRQ
# Endianness selection # Endianness selection
choice choice
......
...@@ -14,7 +14,4 @@ ...@@ -14,7 +14,4 @@
struct pt_regs; struct pt_regs;
extern void do_IRQ(struct pt_regs *regs); extern void do_IRQ(struct pt_regs *regs);
/* should be defined in each interrupt controller driver */
extern unsigned int xintc_get_irq(void);
#endif /* _ASM_MICROBLAZE_IRQ_H */ #endif /* _ASM_MICROBLAZE_IRQ_H */
...@@ -20,29 +20,10 @@ ...@@ -20,29 +20,10 @@
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
static u32 concurrent_irq;
void __irq_entry do_IRQ(struct pt_regs *regs) void __irq_entry do_IRQ(struct pt_regs *regs)
{ {
unsigned int irq;
struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off(); trace_hardirqs_off();
handle_arch_irq(regs);
irq_enter();
irq = xintc_get_irq();
next_irq:
BUG_ON(!irq);
generic_handle_irq(irq);
irq = xintc_get_irq();
if (irq != -1U) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
goto next_irq;
}
irq_exit();
set_irq_regs(old_regs);
trace_hardirqs_on(); trace_hardirqs_on();
} }
......
...@@ -157,5 +157,5 @@ void __init trap_init(void) ...@@ -157,5 +157,5 @@ void __init trap_init(void)
/* Set the exception vector address */ /* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception); csr_write(CSR_TVEC, &handle_exception);
/* Enable interrupts */ /* Enable interrupts */
csr_write(CSR_IE, IE_SIE | IE_EIE); csr_write(CSR_IE, IE_SIE);
} }
...@@ -40,16 +40,6 @@ static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id) ...@@ -40,16 +40,6 @@ static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id)
return IRQ_NONE; return IRQ_NONE;
} }
static struct irqaction cayman_action_smsc = {
.name = "Cayman SMSC Mux",
.handler = cayman_interrupt_smsc,
};
static struct irqaction cayman_action_pci2 = {
.name = "Cayman PCI2 Mux",
.handler = cayman_interrupt_pci2,
};
static void enable_cayman_irq(struct irq_data *data) static void enable_cayman_irq(struct irq_data *data)
{ {
unsigned int irq = data->irq; unsigned int irq = data->irq;
...@@ -149,6 +139,10 @@ void init_cayman_irq(void) ...@@ -149,6 +139,10 @@ void init_cayman_irq(void)
} }
/* Setup the SMSC interrupt */ /* Setup the SMSC interrupt */
setup_irq(SMSC_IRQ, &cayman_action_smsc); if (request_irq(SMSC_IRQ, cayman_interrupt_smsc, 0, "Cayman SMSC Mux",
setup_irq(PCI2_IRQ, &cayman_action_pci2); NULL))
pr_err("Failed to register Cayman SMSC Mux interrupt\n");
if (request_irq(PCI2_IRQ, cayman_interrupt_pci2, 0, "Cayman PCI2 Mux",
NULL))
pr_err("Failed to register Cayman PCI2 Mux interrupt\n");
} }
...@@ -64,11 +64,6 @@ static int pvr2_xfer_dma(struct dma_channel *chan) ...@@ -64,11 +64,6 @@ static int pvr2_xfer_dma(struct dma_channel *chan)
return 0; return 0;
} }
static struct irqaction pvr2_dma_irq = {
.name = "pvr2 DMA handler",
.handler = pvr2_dma_interrupt,
};
static struct dma_ops pvr2_dma_ops = { static struct dma_ops pvr2_dma_ops = {
.request = pvr2_request_dma, .request = pvr2_request_dma,
.get_residue = pvr2_get_dma_residue, .get_residue = pvr2_get_dma_residue,
...@@ -84,7 +79,9 @@ static struct dma_info pvr2_dma_info = { ...@@ -84,7 +79,9 @@ static struct dma_info pvr2_dma_info = {
static int __init pvr2_dma_init(void) static int __init pvr2_dma_init(void)
{ {
setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq); if (request_irq(HW_EVENT_PVR2_DMA, pvr2_dma_interrupt, 0,
"pvr2 DMA handler", NULL))
pr_err("Failed to register pvr2 DMA handler interrupt\n");
request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade"); request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade");
return register_dmac(&pvr2_dma_info); return register_dmac(&pvr2_dma_info);
......
...@@ -72,13 +72,6 @@ static struct clocksource cksrc_puv3_oscr = { ...@@ -72,13 +72,6 @@ static struct clocksource cksrc_puv3_oscr = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
static struct irqaction puv3_timer_irq = {
.name = "ost0",
.flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = puv3_ost0_interrupt,
.dev_id = &ckevt_puv3_osmr0,
};
void __init time_init(void) void __init time_init(void)
{ {
writel(0, OST_OIER); /* disable any timer interrupts */ writel(0, OST_OIER); /* disable any timer interrupts */
...@@ -94,7 +87,9 @@ void __init time_init(void) ...@@ -94,7 +87,9 @@ void __init time_init(void)
ckevt_puv3_osmr0.min_delta_ticks = MIN_OSCR_DELTA * 2; ckevt_puv3_osmr0.min_delta_ticks = MIN_OSCR_DELTA * 2;
ckevt_puv3_osmr0.cpumask = cpumask_of(0); ckevt_puv3_osmr0.cpumask = cpumask_of(0);
setup_irq(IRQ_TIMER0, &puv3_timer_irq); if (request_irq(IRQ_TIMER0, puv3_ost0_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "ost0", &ckevt_puv3_osmr0))
pr_err("Failed to register ost0 interrupt\n");
clocksource_register_hz(&cksrc_puv3_oscr, CLOCK_TICK_RATE); clocksource_register_hz(&cksrc_puv3_oscr, CLOCK_TICK_RATE);
clockevents_register_device(&ckevt_puv3_osmr0); clockevents_register_device(&ckevt_puv3_osmr0);
......
...@@ -128,6 +128,7 @@ config X86 ...@@ -128,6 +128,7 @@ config X86
select GENERIC_GETTIMEOFDAY select GENERIC_GETTIMEOFDAY
select GENERIC_VDSO_TIME_NS select GENERIC_VDSO_TIME_NS
select GUP_GET_PTE_LOW_HIGH if X86_PAE select GUP_GET_PTE_LOW_HIGH if X86_PAE
select HARDIRQS_SW_RESEND
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
select HAVE_ACPI_APEI if ACPI select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI select HAVE_ACPI_APEI_NMI if ACPI
......
...@@ -556,6 +556,12 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, ...@@ -556,6 +556,12 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irqd->chip_data = apicd; irqd->chip_data = apicd;
irqd->hwirq = virq + i; irqd->hwirq = virq + i;
irqd_set_single_target(irqd); irqd_set_single_target(irqd);
/*
* Prevent that any of these interrupts is invoked in
* non interrupt context via e.g. generic_handle_irq()
* as that can corrupt the affinity move state.
*/
irqd_set_handle_enforce_irqctx(irqd);
/* /*
* Legacy vectors are already assigned when the IOAPIC * Legacy vectors are already assigned when the IOAPIC
* takes them over. They stay on the same vector. This is * takes them over. They stay on the same vector. This is
......
...@@ -458,7 +458,7 @@ config IMX_IRQSTEER ...@@ -458,7 +458,7 @@ config IMX_IRQSTEER
Support for the i.MX IRQSTEER interrupt multiplexer/remapper. Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
config IMX_INTMUX config IMX_INTMUX
def_bool y if ARCH_MXC def_bool y if ARCH_MXC || COMPILE_TEST
select IRQ_DOMAIN select IRQ_DOMAIN
help help
Support for the i.MX INTMUX interrupt multiplexer. Support for the i.MX INTMUX interrupt multiplexer.
......
...@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d) ...@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d)
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
irq_gc_unlock(gc); irq_gc_unlock(gc);
return 0; return 1;
} }
static int aic_set_type(struct irq_data *d, unsigned type) static int aic_set_type(struct irq_data *d, unsigned type)
......
...@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d) ...@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d)
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
irq_gc_unlock(bgc); irq_gc_unlock(bgc);
return 0; return 1;
} }
static int aic5_set_type(struct irq_data *d, unsigned type) static int aic5_set_type(struct irq_data *d, unsigned type)
......
...@@ -61,6 +61,7 @@ ...@@ -61,6 +61,7 @@
| SHORTCUT1_MASK | SHORTCUT2_MASK) | SHORTCUT1_MASK | SHORTCUT2_MASK)
#define REG_FIQ_CONTROL 0x0c #define REG_FIQ_CONTROL 0x0c
#define FIQ_CONTROL_ENABLE BIT(7)
#define NR_BANKS 3 #define NR_BANKS 3
#define IRQS_PER_BANK 32 #define IRQS_PER_BANK 32
...@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node,
{ {
void __iomem *base; void __iomem *base;
int irq, b, i; int irq, b, i;
u32 reg;
base = of_iomap(node, 0); base = of_iomap(node, 0);
if (!base) if (!base)
...@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node,
handle_level_irq); handle_level_irq);
irq_set_probe(irq); irq_set_probe(irq);
} }
reg = readl_relaxed(intc.enable[b]);
if (reg) {
writel_relaxed(reg, intc.disable[b]);
pr_err(FW_BUG "Bootloader left irq enabled: "
"bank %d irq %*pbl\n", b, IRQS_PER_BANK, &reg);
}
}
reg = readl_relaxed(base + REG_FIQ_CONTROL);
if (reg & FIQ_CONTROL_ENABLE) {
writel_relaxed(0, base + REG_FIQ_CONTROL);
pr_err(FW_BUG "Bootloader left fiq enabled\n");
} }
if (is_2836) { if (is_2836) {
......
...@@ -50,7 +50,7 @@ struct bcm7038_l1_chip { ...@@ -50,7 +50,7 @@ struct bcm7038_l1_chip {
struct bcm7038_l1_cpu { struct bcm7038_l1_cpu {
void __iomem *map_base; void __iomem *map_base;
u32 mask_cache[0]; u32 mask_cache[];
}; };
/* /*
......
This diff is collapsed.
...@@ -724,6 +724,7 @@ static void __init gic_dist_init(void) ...@@ -724,6 +724,7 @@ static void __init gic_dist_init(void)
unsigned int i; unsigned int i;
u64 affinity; u64 affinity;
void __iomem *base = gic_data.dist_base; void __iomem *base = gic_data.dist_base;
u32 val;
/* Disable the distributor */ /* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR); writel_relaxed(0, base + GICD_CTLR);
...@@ -756,9 +757,14 @@ static void __init gic_dist_init(void) ...@@ -756,9 +757,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */ /* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
pr_info("Enabling SGIs without active state\n");
val |= GICD_CTLR_nASSGIreq;
}
/* Enable distributor with ARE, Group1 */ /* Enable distributor with ARE, Group1 */
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, writel_relaxed(val, base + GICD_CTLR);
base + GICD_CTLR);
/* /*
* Set all global interrupts to the boot CPU only. ARE must be * Set all global interrupts to the boot CPU only. ARE must be
...@@ -829,6 +835,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) ...@@ -829,6 +835,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer = gic_read_typer(ptr + GICR_TYPER); typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) { if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base; u64 offset = ptr - region->redist_base;
raw_spin_lock_init(&gic_data_rdist()->rd_lock);
gic_data_rdist_rd_base() = ptr; gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset; gic_data_rdist()->phys_base = region->phys_base + offset;
...@@ -1609,7 +1616,6 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -1609,7 +1616,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data); &gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true; gic_data.rdists.has_vlpis = true;
...@@ -1620,6 +1626,8 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -1620,6 +1626,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free; goto out_free;
} }
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.has_rss = !!(typer & GICD_TYPER_RSS); gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
pr_info("Distributor has %sRange Selector support\n", pr_info("Distributor has %sRange Selector support\n",
gic_data.has_rss ? "" : "no "); gic_data.has_rss ? "" : "no ");
...@@ -1785,6 +1793,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) ...@@ -1785,6 +1793,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
...@@ -2100,6 +2109,7 @@ static void __init gic_acpi_setup_kvm_info(void) ...@@ -2100,6 +2109,7 @@ static void __init gic_acpi_setup_kvm_info(void)
} }
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
......
...@@ -85,6 +85,53 @@ ...@@ -85,6 +85,53 @@
static struct irq_domain *gic_domain; static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops;
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
{
char *name;
int sgi_base;
if (!has_v4_1())
return 0;
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
if (!name)
goto err;
vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
if (!vpe->fwnode)
goto err;
kfree(name);
name = NULL;
vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
sgi_domain_ops, vpe);
if (!vpe->sgi_domain)
goto err;
sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
NUMA_NO_NODE, vpe,
false, NULL);
if (sgi_base <= 0)
goto err;
return 0;
err:
if (vpe->sgi_domain)
irq_domain_remove(vpe->sgi_domain);
if (vpe->fwnode)
irq_domain_free_fwnode(vpe->fwnode);
kfree(name);
return -ENOMEM;
}
int its_alloc_vcpu_irqs(struct its_vm *vm) int its_alloc_vcpu_irqs(struct its_vm *vm)
{ {
...@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) ...@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if (vpe_base_irq <= 0) if (vpe_base_irq <= 0)
goto err; goto err;
for (i = 0; i < vm->nr_vpes; i++) for (i = 0; i < vm->nr_vpes; i++) {
int ret;
vm->vpes[i]->irq = vpe_base_irq + i; vm->vpes[i]->irq = vpe_base_irq + i;
ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
if (ret)
goto err;
}
return 0; return 0;
...@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) ...@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
return -ENOMEM; return -ENOMEM;
} }
static void its_free_sgi_irqs(struct its_vm *vm)
{
int i;
if (!has_v4_1())
return;
for (i = 0; i < vm->nr_vpes; i++) {
unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
if (WARN_ON(!irq))
continue;
irq_domain_free_irqs(irq, 16);
irq_domain_remove(vm->vpes[i]->sgi_domain);
irq_domain_free_fwnode(vm->vpes[i]->fwnode);
}
}
void its_free_vcpu_irqs(struct its_vm *vm) void its_free_vcpu_irqs(struct its_vm *vm)
{ {
its_free_sgi_irqs(vm);
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain); irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode); irq_domain_free_fwnode(vm->fwnode);
...@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) ...@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info); return irq_set_vcpu_affinity(vpe->irq, info);
} }
int its_schedule_vpe(struct its_vpe *vpe, bool on) int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
{
struct irq_desc *desc = irq_to_desc(vpe->irq);
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = DESCHEDULE_VPE;
if (has_v4_1()) {
/* GICv4.1 can directly deal with doorbells */
info.req_db = db;
} else {
/* Undo the nested disable_irq() calls... */
while (db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = false;
return ret;
}
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{ {
struct its_cmd_info info; struct its_cmd_info info = { };
int ret; int ret;
WARN_ON(preemptible()); WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; info.cmd_type = SCHEDULE_VPE;
if (has_v4_1()) {
info.g0en = g0en;
info.g1en = g1en;
} else {
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info); ret = its_send_vpe_cmd(vpe, &info);
if (!ret) if (!ret)
vpe->resident = on; vpe->resident = true;
return ret; return ret;
} }
...@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) ...@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return irq_set_vcpu_affinity(irq, &info); return irq_set_vcpu_affinity(irq, &info);
} }
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) int its_prop_update_vsgi(int irq, u8 priority, bool group)
{
struct its_cmd_info info = {
.cmd_type = PROP_UPDATE_VSGI,
{
.priority = priority,
.group = group,
},
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops)
{ {
if (domain) { if (domain) {
pr_info("ITS: Enabling GICv4 support\n"); pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain; gic_domain = domain;
vpe_domain_ops = ops; vpe_domain_ops = vpe_ops;
sgi_domain_ops = sgi_ops;
return 0; return 0;
} }
......
...@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi) ...@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi)
raw_spin_unlock_irqrestore(&i8259A_lock, flags); raw_spin_unlock_irqrestore(&i8259A_lock, flags);
} }
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = { static struct resource pic1_io_resource = {
.name = "pic1", .name = "pic1",
.start = PIC_MASTER_CMD, .start = PIC_MASTER_CMD,
...@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = { ...@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = {
*/ */
struct irq_domain * __init __init_i8259_irqs(struct device_node *node) struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
{ {
/*
* PIC_CASCADE_IR is cascade interrupt to second interrupt controller
*/
int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
struct irq_domain *domain; struct irq_domain *domain;
insert_resource(&ioport_resource, &pic1_io_resource); insert_resource(&ioport_resource, &pic1_io_resource);
...@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) ...@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (!domain) if (!domain)
panic("Failed to add i8259 IRQ domain"); panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to register cascade interrupt\n");
register_syscore_ops(&i8259_syscore_ops); register_syscore_ops(&i8259_syscore_ops);
return domain; return domain;
} }
......
...@@ -180,3 +180,4 @@ static int __init ingenic_tcu_irq_init(struct device_node *np, ...@@ -180,3 +180,4 @@ static int __init ingenic_tcu_irq_init(struct device_node *np,
IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
...@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data) ...@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction intc_cascade_action = {
.handler = intc_cascade,
.name = "SoC intc cascade interrupt",
};
static int __init ingenic_intc_of_init(struct device_node *node, static int __init ingenic_intc_of_init(struct device_node *node,
unsigned num_chips) unsigned num_chips)
{ {
...@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node, ...@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node,
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK); irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
} }
setup_irq(parent_irq, &intc_cascade_action); if (request_irq(parent_irq, intc_cascade, 0,
"SoC intc cascade interrupt", NULL))
pr_err("Failed to register SoC intc cascade interrupt\n");
return 0; return 0;
out_domain_remove: out_domain_remove:
......
...@@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) ...@@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
} }
i->iomem = devm_ioremap(dev, io[k]->start, i->iomem = devm_ioremap(dev, io[k]->start,
resource_size(io[k])); resource_size(io[k]));
if (!i->iomem) { if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n"); dev_err(dev, "failed to remap IOMEM\n");
ret = -ENXIO; ret = -ENXIO;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 2018 Christoph Hellwig * Copyright (C) 2018 Christoph Hellwig
*/ */
#define pr_fmt(fmt) "plic: " fmt #define pr_fmt(fmt) "plic: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/irq.h> #include <linux/irq.h>
...@@ -55,7 +56,14 @@ ...@@ -55,7 +56,14 @@
#define CONTEXT_THRESHOLD 0x00 #define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04 #define CONTEXT_CLAIM 0x04
static void __iomem *plic_regs; #define PLIC_DISABLE_THRESHOLD 0xf
#define PLIC_ENABLE_THRESHOLD 0
struct plic_priv {
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
};
struct plic_handler { struct plic_handler {
bool present; bool present;
...@@ -66,6 +74,7 @@ struct plic_handler { ...@@ -66,6 +74,7 @@ struct plic_handler {
*/ */
raw_spinlock_t enable_lock; raw_spinlock_t enable_lock;
void __iomem *enable_base; void __iomem *enable_base;
struct plic_priv *priv;
}; };
static DEFINE_PER_CPU(struct plic_handler, plic_handlers); static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
...@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler, ...@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler,
} }
static inline void plic_irq_toggle(const struct cpumask *mask, static inline void plic_irq_toggle(const struct cpumask *mask,
int hwirq, int enable) struct irq_data *d, int enable)
{ {
int cpu; int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) { for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) if (handler->present &&
plic_toggle(handler, hwirq, enable); cpumask_test_cpu(cpu, &handler->priv->lmask))
plic_toggle(handler, d->hwirq, enable);
} }
} }
static void plic_irq_unmask(struct irq_data *d) static void plic_irq_unmask(struct irq_data *d)
{ {
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), struct cpumask amask;
cpu_online_mask); unsigned int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
&amask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return; return;
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); plic_irq_toggle(cpumask_of(cpu), d, 1);
} }
static void plic_irq_mask(struct irq_data *d) static void plic_irq_mask(struct irq_data *d)
{ {
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); struct plic_priv *priv = irq_get_chip_data(d->irq);
plic_irq_toggle(&priv->lmask, d, 0);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d, ...@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force) const struct cpumask *mask_val, bool force)
{ {
unsigned int cpu; unsigned int cpu;
struct cpumask amask;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, mask_val);
if (force) if (force)
cpu = cpumask_first(mask_val); cpu = cpumask_first(&amask);
else else
cpu = cpumask_any_and(mask_val, cpu_online_mask); cpu = cpumask_any_and(&amask, cpu_online_mask);
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); plic_irq_toggle(&priv->lmask, d, 0);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); plic_irq_toggle(cpumask_of(cpu), d, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu));
...@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = { ...@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.free = irq_domain_free_irqs_top, .free = irq_domain_free_irqs_top,
}; };
static struct irq_domain *plic_irqdomain;
/* /*
* Handling an interrupt is a two-step process: first you claim the interrupt * Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing * by reading the claim register, then you complete the interrupt by writing
...@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs) ...@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs)
csr_clear(CSR_IE, IE_EIE); csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) { while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq); int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
if (unlikely(irq <= 0)) if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n", pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
...@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node) ...@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node)
return -1; return -1;
} }
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
{
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
}
static int plic_dying_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_clear(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
return 0;
}
static int plic_starting_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_set(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
}
static int __init plic_init(struct device_node *node, static int __init plic_init(struct device_node *node,
struct device_node *parent) struct device_node *parent)
{ {
int error = 0, nr_contexts, nr_handlers = 0, i; int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs; u32 nr_irqs;
struct plic_priv *priv;
if (plic_regs) { priv = kzalloc(sizeof(*priv), GFP_KERNEL);
pr_warn("PLIC already present.\n"); if (!priv)
return -ENXIO; return -ENOMEM;
}
plic_regs = of_iomap(node, 0); priv->regs = of_iomap(node, 0);
if (WARN_ON(!plic_regs)) if (WARN_ON(!priv->regs)) {
return -EIO; error = -EIO;
goto out_free_priv;
}
error = -EINVAL; error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs); of_property_read_u32(node, "riscv,ndev", &nr_irqs);
...@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node, ...@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node,
goto out_iounmap; goto out_iounmap;
error = -ENOMEM; error = -ENOMEM;
plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, NULL); &plic_irqdomain_ops, priv);
if (WARN_ON(!plic_irqdomain)) if (WARN_ON(!priv->irqdomain))
goto out_iounmap; goto out_iounmap;
for (i = 0; i < nr_contexts; i++) { for (i = 0; i < nr_contexts; i++) {
...@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node, ...@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler; struct plic_handler *handler;
irq_hw_number_t hwirq; irq_hw_number_t hwirq;
int cpu, hartid; int cpu, hartid;
u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) { if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i); pr_err("failed to parse parent for context %d.\n", i);
...@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node, ...@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node,
handler = per_cpu_ptr(&plic_handlers, cpu); handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) { if (handler->present) {
pr_warn("handler already present for context %d.\n", i); pr_warn("handler already present for context %d.\n", i);
threshold = 0xffffffff; plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done; goto done;
} }
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true; handler->present = true;
handler->hart_base = handler->hart_base =
plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock); raw_spin_lock_init(&handler->enable_lock);
handler->enable_base = handler->enable_base =
plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
handler->priv = priv;
done: done:
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0); plic_toggle(handler, hwirq, 0);
nr_handlers++; nr_handlers++;
} }
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
nr_irqs, nr_handlers, nr_contexts); nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq); set_handle_irq(plic_handle_irq);
return 0; return 0;
out_iounmap: out_iounmap:
iounmap(plic_regs); iounmap(priv->regs);
out_free_priv:
kfree(priv);
return error; return error;
} }
......
...@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void) ...@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void)
unregister_syscore_ops(&stm32_exti_h_syscore_ops); unregister_syscore_ops(&stm32_exti_h_syscore_ops);
} }
static int stm32_exti_h_retrigger(struct irq_data *d)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
void __iomem *base = chip_data->host_data->base;
u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
writel_relaxed(mask, base + stm32_bank->swier_ofst);
return 0;
}
static struct irq_chip stm32_exti_h_chip = { static struct irq_chip stm32_exti_h_chip = {
.name = "stm32-exti-h", .name = "stm32-exti-h",
.irq_eoi = stm32_exti_h_eoi, .irq_eoi = stm32_exti_h_eoi,
.irq_mask = stm32_exti_h_mask, .irq_mask = stm32_exti_h_mask,
.irq_unmask = stm32_exti_h_unmask, .irq_unmask = stm32_exti_h_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = stm32_exti_h_retrigger,
.irq_set_type = stm32_exti_h_set_type, .irq_set_type = stm32_exti_h_set_type,
.irq_set_wake = stm32_exti_h_set_wake, .irq_set_wake = stm32_exti_h_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND, .flags = IRQCHIP_MASK_ON_SUSPEND,
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h> #include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) ...@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc) static void fpga_irq_handle(struct irq_desc *desc)
{ {
struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc); struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
u32 status = readl(f->base + IRQ_STATUS); u32 status;
chained_irq_enter(chip, desc);
status = readl(f->base + IRQ_STATUS);
if (status == 0) { if (status == 0) {
do_bad_IRQ(desc); do_bad_IRQ(desc);
return; goto out;
} }
do { do {
...@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) ...@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq); status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq)); generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status); } while (status);
out:
chained_irq_exit(chip, desc);
} }
/* /*
...@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, ...@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask)) if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0; valid_mask = 0;
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/* Some chips are cascaded from a parent IRQ */ /* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0); parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) { if (!parent_irq) {
...@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, ...@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/* /*
* On Versatile AB/PB, some secondary interrupts have a direct * On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need * pass-thru to the primary controller for IRQs 20 and 22-31 which need
......
...@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node, ...@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node,
void __iomem *regs; void __iomem *regs;
u32 interrupt_mask = ~0; u32 interrupt_mask = ~0;
u32 wakeup_mask = ~0; u32 wakeup_mask = ~0;
int parent_irq;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
regs = of_iomap(node, 0); regs = of_iomap(node, 0);
if (WARN_ON(!regs)) if (WARN_ON(!regs))
...@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node, ...@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node,
of_property_read_u32(node, "valid-mask", &interrupt_mask); of_property_read_u32(node, "valid-mask", &interrupt_mask);
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask); of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
parent_irq = of_irq_get(node, 0);
if (parent_irq < 0)
parent_irq = 0;
/* /*
* Passing 0 as first IRQ makes the simple domain allocate descriptors * Passing 0 as first IRQ makes the simple domain allocate descriptors
*/ */
__vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
return 0; return 0;
} }
......
...@@ -38,29 +38,31 @@ struct xintc_irq_chip { ...@@ -38,29 +38,31 @@ struct xintc_irq_chip {
void __iomem *base; void __iomem *base;
struct irq_domain *root_domain; struct irq_domain *root_domain;
u32 intr_mask; u32 intr_mask;
u32 nr_irq;
}; };
static struct xintc_irq_chip *xintc_irqc; static struct xintc_irq_chip *primary_intc;
static void xintc_write(int reg, u32 data) static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
{ {
if (static_branch_unlikely(&xintc_is_be)) if (static_branch_unlikely(&xintc_is_be))
iowrite32be(data, xintc_irqc->base + reg); iowrite32be(data, irqc->base + reg);
else else
iowrite32(data, xintc_irqc->base + reg); iowrite32(data, irqc->base + reg);
} }
static unsigned int xintc_read(int reg) static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
{ {
if (static_branch_unlikely(&xintc_is_be)) if (static_branch_unlikely(&xintc_is_be))
return ioread32be(xintc_irqc->base + reg); return ioread32be(irqc->base + reg);
else else
return ioread32(xintc_irqc->base + reg); return ioread32(irqc->base + reg);
} }
static void intc_enable_or_unmask(struct irq_data *d) static void intc_enable_or_unmask(struct irq_data *d)
{ {
unsigned long mask = 1 << d->hwirq; struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq); pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
...@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d) ...@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler * acks the irq before calling the interrupt handler
*/ */
if (irqd_is_level_type(d)) if (irqd_is_level_type(d))
xintc_write(IAR, mask); xintc_write(irqc, IAR, mask);
xintc_write(SIE, mask); xintc_write(irqc, SIE, mask);
} }
static void intc_disable_or_mask(struct irq_data *d) static void intc_disable_or_mask(struct irq_data *d)
{ {
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq); pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
xintc_write(CIE, 1 << d->hwirq); xintc_write(irqc, CIE, BIT(d->hwirq));
} }
static void intc_ack(struct irq_data *d) static void intc_ack(struct irq_data *d)
{ {
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq); pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
xintc_write(IAR, 1 << d->hwirq); xintc_write(irqc, IAR, BIT(d->hwirq));
} }
static void intc_mask_ack(struct irq_data *d) static void intc_mask_ack(struct irq_data *d)
{ {
unsigned long mask = 1 << d->hwirq; struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq); pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
xintc_write(CIE, mask); xintc_write(irqc, CIE, mask);
xintc_write(IAR, mask); xintc_write(irqc, IAR, mask);
} }
static struct irq_chip intc_dev = { static struct irq_chip intc_dev = {
...@@ -103,13 +110,14 @@ static struct irq_chip intc_dev = { ...@@ -103,13 +110,14 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack, .irq_mask_ack = intc_mask_ack,
}; };
unsigned int xintc_get_irq(void) static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
{ {
unsigned int hwirq, irq = -1; unsigned int irq = 0;
u32 hwirq;
hwirq = xintc_read(IVR); hwirq = xintc_read(irqc, IVR);
if (hwirq != -1U) if (hwirq != -1U)
irq = irq_find_mapping(xintc_irqc->root_domain, hwirq); irq = irq_find_mapping(irqc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
...@@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void) ...@@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{ {
if (xintc_irqc->intr_mask & (1 << hw)) { struct xintc_irq_chip *irqc = d->host_data;
if (irqc->intr_mask & BIT(hw)) {
irq_set_chip_and_handler_name(irq, &intc_dev, irq_set_chip_and_handler_name(irq, &intc_dev,
handle_edge_irq, "edge"); handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL); irq_clear_status_flags(irq, IRQ_LEVEL);
} else { } else {
irq_set_chip_and_handler_name(irq, &intc_dev, irq_set_chip_and_handler_name(irq, &intc_dev,
handle_level_irq, "level"); handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL); irq_set_status_flags(irq, IRQ_LEVEL);
} }
irq_set_chip_data(irq, irqc);
return 0; return 0;
} }
...@@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = { ...@@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc) static void xil_intc_irq_handler(struct irq_desc *desc)
{ {
struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip *chip = irq_desc_get_chip(desc);
struct xintc_irq_chip *irqc;
u32 pending; u32 pending;
irqc = irq_data_get_irq_handler_data(&desc->irq_data);
chained_irq_enter(chip, desc); chained_irq_enter(chip, desc);
do { do {
pending = xintc_get_irq(); pending = xintc_get_irq_local(irqc);
if (pending == -1U) if (pending == 0)
break; break;
generic_handle_irq(pending); generic_handle_irq(pending);
} while (true); } while (true);
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
} }
static void xil_intc_handle_irq(struct pt_regs *regs)
{
u32 hwirq;
struct xintc_irq_chip *irqc = primary_intc;
do {
hwirq = xintc_read(irqc, IVR);
if (likely(hwirq != -1U)) {
int ret;
ret = handle_domain_irq(irqc->root_domain, hwirq, regs);
WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq);
continue;
}
break;
} while (1);
}
static int __init xilinx_intc_of_init(struct device_node *intc, static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent) struct device_node *parent)
{ {
u32 nr_irq;
int ret, irq;
struct xintc_irq_chip *irqc; struct xintc_irq_chip *irqc;
int ret, irq;
if (xintc_irqc) {
pr_err("irq-xilinx: Multiple instances aren't supported\n");
return -EINVAL;
}
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc) if (!irqc)
return -ENOMEM; return -ENOMEM;
xintc_irqc = irqc;
irqc->base = of_iomap(intc, 0); irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base); BUG_ON(!irqc->base);
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq); ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) { if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n"); pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
goto err_alloc; goto error;
} }
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask); ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
...@@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc, ...@@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0; irqc->intr_mask = 0;
} }
if (irqc->intr_mask >> nr_irq) if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
intc, nr_irq, irqc->intr_mask); intc, irqc->nr_irq, irqc->intr_mask);
/* /*
* Disable all external interrupts until they are * Disable all external interrupts until they are
* explicity requested. * explicity requested.
*/ */
xintc_write(IER, 0); xintc_write(irqc, IER, 0);
/* Acknowledge any pending interrupts just in case. */ /* Acknowledge any pending interrupts just in case. */
xintc_write(IAR, 0xffffffff); xintc_write(irqc, IAR, 0xffffffff);
/* Turn on the Master Enable. */ /* Turn on the Master Enable. */
xintc_write(MER, MER_HIE | MER_ME); xintc_write(irqc, MER, MER_HIE | MER_ME);
if (!(xintc_read(MER) & (MER_HIE | MER_ME))) { if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
static_branch_enable(&xintc_is_be); static_branch_enable(&xintc_is_be);
xintc_write(MER, MER_HIE | MER_ME); xintc_write(irqc, MER, MER_HIE | MER_ME);
} }
irqc->root_domain = irq_domain_add_linear(intc, nr_irq, irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc); &xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) { if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n"); pr_err("irq-xilinx: Unable to create IRQ domain\n");
goto err_alloc; ret = -EINVAL;
goto error;
} }
if (parent) { if (parent) {
...@@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc, ...@@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else { } else {
pr_err("irq-xilinx: interrupts property not in DT\n"); pr_err("irq-xilinx: interrupts property not in DT\n");
ret = -EINVAL; ret = -EINVAL;
goto err_alloc; goto error;
} }
} else { } else {
irq_set_default_host(irqc->root_domain); primary_intc = irqc;
set_handle_irq(xil_intc_handle_irq);
} }
return 0; return 0;
err_alloc: error:
xintc_irqc = NULL; iounmap(irqc->base);
kfree(irqc); kfree(irqc);
return ret; return ret;
......
...@@ -33,7 +33,7 @@ struct combiner { ...@@ -33,7 +33,7 @@ struct combiner {
int parent_irq; int parent_irq;
u32 nirqs; u32 nirqs;
u32 nregs; u32 nregs;
struct combiner_reg regs[0]; struct combiner_reg regs[];
}; };
static inline int irq_nr(u32 reg, u32 bit) static inline int irq_nr(u32 reg, u32 bit)
......
...@@ -34,6 +34,7 @@ config PCIEAER ...@@ -34,6 +34,7 @@ config PCIEAER
config PCIEAER_INJECT config PCIEAER_INJECT
tristate "PCI Express error injection support" tristate "PCI Express error injection support"
depends on PCIEAER depends on PCIEAER
select GENERIC_IRQ_INJECTION
help help
This enables PCI Express Root Port Advanced Error Reporting This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector. (AER) software error injector.
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/irq.h> #include <linux/interrupt.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj) ...@@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj)
} }
pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n", pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev)); einj->cor_status, einj->uncor_status, pci_name(dev));
local_irq_disable(); ret = irq_inject_interrupt(edev->irq);
generic_handle_irq(edev->irq);
local_irq_enable();
} else { } else {
pci_err(rpdev, "AER device not found\n"); pci_err(rpdev, "AER device not found\n");
ret = -ENODEV; ret = -ENODEV;
......
...@@ -92,6 +92,7 @@ struct stm32_gpio_bank { ...@@ -92,6 +92,7 @@ struct stm32_gpio_bank {
u32 bank_nr; u32 bank_nr;
u32 bank_ioport_nr; u32 bank_ioport_nr;
u32 pin_backup[STM32_GPIO_PINS_PER_BANK]; u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
u8 irq_type[STM32_GPIO_PINS_PER_BANK];
}; };
struct stm32_pinctrl { struct stm32_pinctrl {
...@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = { ...@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = {
.get_direction = stm32_gpio_get_direction, .get_direction = stm32_gpio_get_direction,
}; };
static void stm32_gpio_irq_trigger(struct irq_data *d)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
int level;
/* If level interrupt type then retrig */
level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
}
static void stm32_gpio_irq_eoi(struct irq_data *d)
{
irq_chip_eoi_parent(d);
stm32_gpio_irq_trigger(d);
};
static int stm32_gpio_set_type(struct irq_data *d, unsigned int type)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
u32 parent_type;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
parent_type = type;
break;
case IRQ_TYPE_LEVEL_HIGH:
parent_type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
parent_type = IRQ_TYPE_EDGE_FALLING;
break;
default:
return -EINVAL;
}
bank->irq_type[d->hwirq] = type;
return irq_chip_set_type_parent(d, parent_type);
};
static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{ {
struct stm32_gpio_bank *bank = irq_data->domain->host_data; struct stm32_gpio_bank *bank = irq_data->domain->host_data;
...@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data) ...@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq); gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
} }
static void stm32_gpio_irq_unmask(struct irq_data *d)
{
irq_chip_unmask_parent(d);
stm32_gpio_irq_trigger(d);
}
static struct irq_chip stm32_gpio_irq_chip = { static struct irq_chip stm32_gpio_irq_chip = {
.name = "stm32gpio", .name = "stm32gpio",
.irq_eoi = irq_chip_eoi_parent, .irq_eoi = stm32_gpio_irq_eoi,
.irq_ack = irq_chip_ack_parent, .irq_ack = irq_chip_ack_parent,
.irq_mask = irq_chip_mask_parent, .irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent, .irq_unmask = stm32_gpio_irq_unmask,
.irq_set_type = irq_chip_set_type_parent, .irq_set_type = stm32_gpio_set_type,
.irq_set_wake = irq_chip_set_wake_parent, .irq_set_wake = irq_chip_set_wake_parent,
.irq_request_resources = stm32_gpio_irq_request_resources, .irq_request_resources = stm32_gpio_irq_request_resources,
.irq_release_resources = stm32_gpio_irq_release_resources, .irq_release_resources = stm32_gpio_irq_release_resources,
......
...@@ -70,6 +70,7 @@ struct vgic_global { ...@@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */ /* Hardware has GICv4? */
bool has_gicv4; bool has_gicv4;
bool has_gicv4_1;
/* GIC system register CPU interface */ /* GIC system register CPU interface */
struct static_key_false gicv3_cpuif; struct static_key_false gicv3_cpuif;
......
...@@ -102,6 +102,7 @@ enum cpuhp_state { ...@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
......
...@@ -248,6 +248,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type); ...@@ -248,6 +248,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
extern int prepare_percpu_nmi(unsigned int irq); extern int prepare_percpu_nmi(unsigned int irq);
extern void teardown_percpu_nmi(unsigned int irq); extern void teardown_percpu_nmi(unsigned int irq);
extern int irq_inject_interrupt(unsigned int irq);
/* The following three functions are for the core kernel use only. */ /* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void); extern void suspend_device_irqs(void);
extern void resume_device_irqs(void); extern void resume_device_irqs(void);
......
...@@ -211,6 +211,8 @@ struct irq_data { ...@@ -211,6 +211,8 @@ struct irq_data {
* IRQD_CAN_RESERVE - Can use reservation mode * IRQD_CAN_RESERVE - Can use reservation mode
* IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
* required * required
* IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
* from actual interrupt context.
*/ */
enum { enum {
IRQD_TRIGGER_MASK = 0xf, IRQD_TRIGGER_MASK = 0xf,
...@@ -234,6 +236,7 @@ enum { ...@@ -234,6 +236,7 @@ enum {
IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_CAN_RESERVE = (1 << 26), IRQD_CAN_RESERVE = (1 << 26),
IRQD_MSI_NOMASK_QUIRK = (1 << 27), IRQD_MSI_NOMASK_QUIRK = (1 << 27),
IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
}; };
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
...@@ -303,6 +306,16 @@ static inline bool irqd_is_single_target(struct irq_data *d) ...@@ -303,6 +306,16 @@ static inline bool irqd_is_single_target(struct irq_data *d)
return __irqd_to_state(d) & IRQD_SINGLE_TARGET; return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
} }
static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
}
static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
}
static inline bool irqd_is_wakeup_set(struct irq_data *d) static inline bool irqd_is_wakeup_set(struct irq_data *d)
{ {
return __irqd_to_state(d) & IRQD_WAKEUP_STATE; return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
......
...@@ -32,6 +32,8 @@ struct gic_kvm_info { ...@@ -32,6 +32,8 @@ struct gic_kvm_info {
struct resource vctrl; struct resource vctrl;
/* vlpi support */ /* vlpi support */
bool has_v4; bool has_v4;
/* rvpeid support */
bool has_v4_1;
}; };
const struct gic_kvm_info *gic_get_kvm_info(void); const struct gic_kvm_info *gic_get_kvm_info(void);
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#define GICD_SPENDSGIR 0x0F20 #define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31) #define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6) #define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4) #define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1) #define GICD_CTLR_ENABLE_G1A (1U << 1)
...@@ -90,6 +91,7 @@ ...@@ -90,6 +91,7 @@
#define GICD_TYPER_ESPIS(typer) \ #define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
#define GICD_TYPER2_nASSGIcap (1U << 8)
#define GICD_TYPER2_VIL (1U << 7) #define GICD_TYPER2_VIL (1U << 7)
#define GICD_TYPER2_VID GENMASK(4, 0) #define GICD_TYPER2_VID GENMASK(4, 0)
...@@ -320,6 +322,9 @@ ...@@ -320,6 +322,9 @@
#define GICR_VPENDBASER_NonShareable \ #define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
#define GICR_VPENDBASER_InnerShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
...@@ -343,6 +348,15 @@ ...@@ -343,6 +348,15 @@
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) #define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) #define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
#define GICR_VSGIR 0x0080
#define GICR_VSGIR_VPEID GENMASK(15, 0)
#define GICR_VSGIPENDR 0x0088
#define GICR_VSGIPENDR_BUSY (1U << 31)
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
/* /*
* ITS registers, offsets from ITS_base * ITS registers, offsets from ITS_base
*/ */
...@@ -366,6 +380,11 @@ ...@@ -366,6 +380,11 @@
#define GITS_TRANSLATER 0x10040 #define GITS_TRANSLATER 0x10040
#define GITS_SGIR 0x20020
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
#define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1) #define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4 #define GITS_CTLR_ITS_NUMBER_SHIFT 4
...@@ -500,8 +519,9 @@ ...@@ -500,8 +519,9 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) #define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) #define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) #define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */ /* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) #define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) #define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/* /*
...@@ -650,6 +670,7 @@ ...@@ -650,6 +670,7 @@
struct rdists { struct rdists {
struct { struct {
raw_spinlock_t rd_lock;
void __iomem *rd_base; void __iomem *rd_base;
struct page *pend_page; struct page *pend_page;
phys_addr_t phys_base; phys_addr_t phys_base;
......
...@@ -49,10 +49,22 @@ struct its_vpe { ...@@ -49,10 +49,22 @@ struct its_vpe {
}; };
/* GICv4.1 implementations */ /* GICv4.1 implementations */
struct { struct {
struct fwnode_handle *fwnode;
struct irq_domain *sgi_domain;
struct {
u8 priority;
bool enabled;
bool group;
} sgi_config[16];
atomic_t vmapp_count; atomic_t vmapp_count;
}; };
}; };
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
*/
raw_spinlock_t vpe_lock;
/* /*
* This collection ID is used to indirect the target * This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in * redistributor for this VPE. The ID itself isn't involved in
...@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type { ...@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type {
SCHEDULE_VPE, SCHEDULE_VPE,
DESCHEDULE_VPE, DESCHEDULE_VPE,
INVALL_VPE, INVALL_VPE,
PROP_UPDATE_VSGI,
}; };
struct its_cmd_info { struct its_cmd_info {
...@@ -105,19 +118,27 @@ struct its_cmd_info { ...@@ -105,19 +118,27 @@ struct its_cmd_info {
bool g0en; bool g0en;
bool g1en; bool g1en;
}; };
struct {
u8 priority;
bool group;
};
}; };
}; };
int its_alloc_vcpu_irqs(struct its_vm *vm); int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on); int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_invall_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq); int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv); int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
struct irq_domain_ops; struct irq_domain_ops;
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops);
#endif #endif
...@@ -43,6 +43,10 @@ config GENERIC_IRQ_MIGRATION ...@@ -43,6 +43,10 @@ config GENERIC_IRQ_MIGRATION
config AUTO_IRQ_AFFINITY config AUTO_IRQ_AFFINITY
bool bool
# Interrupt injection mechanism
config GENERIC_IRQ_INJECTION
bool
# Tasklet based software resend for pending interrupts on enable_irq() # Tasklet based software resend for pending interrupts on enable_irq()
config HARDIRQS_SW_RESEND config HARDIRQS_SW_RESEND
bool bool
...@@ -127,6 +131,7 @@ config SPARSE_IRQ ...@@ -127,6 +131,7 @@ config SPARSE_IRQ
config GENERIC_IRQ_DEBUGFS config GENERIC_IRQ_DEBUGFS
bool "Expose irq internals in debugfs" bool "Expose irq internals in debugfs"
depends on DEBUG_FS depends on DEBUG_FS
select GENERIC_IRQ_INJECTION
default n default n
---help--- ---help---
......
...@@ -278,7 +278,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) ...@@ -278,7 +278,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
} }
} }
if (resend) if (resend)
check_irq_resend(desc); check_irq_resend(desc, false);
return ret; return ret;
} }
......
...@@ -190,33 +190,7 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, ...@@ -190,33 +190,7 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
return -EFAULT; return -EFAULT;
if (!strncmp(buf, "trigger", size)) { if (!strncmp(buf, "trigger", size)) {
unsigned long flags; int err = irq_inject_interrupt(irq_desc_get_irq(desc));
int err;
/* Try the HW interface first */
err = irq_set_irqchip_state(irq_desc_get_irq(desc),
IRQCHIP_STATE_PENDING, true);
if (!err)
return count;
/*
* Otherwise, try to inject via the resend interface,
* which may or may not succeed.
*/
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) {
/* Can't do level nor NMIs, sorry */
err = -EINVAL;
} else {
desc->istate |= IRQS_PENDING;
check_irq_resend(desc);
err = 0;
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(desc);
return err ? err : count; return err ? err : count;
} }
......
...@@ -108,7 +108,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); ...@@ -108,7 +108,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event(struct irq_desc *desc); irqreturn_t handle_irq_event(struct irq_desc *desc);
/* Resending of interrupts :*/ /* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc); int check_irq_resend(struct irq_desc *desc, bool inject);
bool irq_wait_for_poll(struct irq_desc *desc); bool irq_wait_for_poll(struct irq_desc *desc);
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
...@@ -425,6 +425,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) ...@@ -425,6 +425,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
{ {
return desc->pending_mask; return desc->pending_mask;
} }
static inline bool handle_enforce_irqctx(struct irq_data *data)
{
return irqd_is_handle_enforce_irqctx(data);
}
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear);
#else /* CONFIG_GENERIC_PENDING_IRQ */ #else /* CONFIG_GENERIC_PENDING_IRQ */
static inline bool irq_can_move_pcntxt(struct irq_data *data) static inline bool irq_can_move_pcntxt(struct irq_data *data)
...@@ -451,6 +455,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) ...@@ -451,6 +455,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
{ {
return false; return false;
} }
static inline bool handle_enforce_irqctx(struct irq_data *data)
{
return false;
}
#endif /* !CONFIG_GENERIC_PENDING_IRQ */ #endif /* !CONFIG_GENERIC_PENDING_IRQ */
#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
......
...@@ -638,9 +638,15 @@ void irq_init_desc(unsigned int irq) ...@@ -638,9 +638,15 @@ void irq_init_desc(unsigned int irq)
int generic_handle_irq(unsigned int irq) int generic_handle_irq(unsigned int irq)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *data;
if (!desc) if (!desc)
return -EINVAL; return -EINVAL;
data = irq_desc_get_irq_data(desc);
if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data)))
return -EPERM;
generic_handle_irq_desc(desc); generic_handle_irq_desc(desc);
return 0; return 0;
} }
......
...@@ -46,11 +46,11 @@ const struct fwnode_operations irqchip_fwnode_ops; ...@@ -46,11 +46,11 @@ const struct fwnode_operations irqchip_fwnode_ops;
EXPORT_SYMBOL_GPL(irqchip_fwnode_ops); EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
/** /**
* irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
* identifying an irq domain * identifying an irq domain
* @type: Type of irqchip_fwnode. See linux/irqdomain.h * @type: Type of irqchip_fwnode. See linux/irqdomain.h
* @name: Optional user provided domain name
* @id: Optional user provided id if name != NULL * @id: Optional user provided id if name != NULL
* @name: Optional user provided domain name
* @pa: Optional user-provided physical address * @pa: Optional user-provided physical address
* *
* Allocate a struct irqchip_fwid, and return a poiner to the embedded * Allocate a struct irqchip_fwid, and return a poiner to the embedded
...@@ -1310,6 +1310,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, ...@@ -1310,6 +1310,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base, unsigned int irq_base,
unsigned int nr_irqs, void *arg) unsigned int nr_irqs, void *arg)
{ {
if (!domain->ops->alloc) {
pr_debug("domain->ops->alloc() is NULL\n");
return -ENOSYS;
}
return domain->ops->alloc(domain, irq_base, nr_irqs, arg); return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
} }
...@@ -1347,11 +1352,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, ...@@ -1347,11 +1352,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return -EINVAL; return -EINVAL;
} }
if (!domain->ops->alloc) {
pr_debug("domain->ops->alloc() is NULL\n");
return -ENOSYS;
}
if (realloc && irq_base >= 0) { if (realloc && irq_base >= 0) {
virq = irq_base; virq = irq_base;
} else { } else {
......
...@@ -47,6 +47,43 @@ static void resend_irqs(unsigned long arg) ...@@ -47,6 +47,43 @@ static void resend_irqs(unsigned long arg)
/* Tasklet to handle resend: */ /* Tasklet to handle resend: */
static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
static int irq_sw_resend(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
/*
* Validate whether this interrupt can be safely injected from
* non interrupt context
*/
if (handle_enforce_irqctx(&desc->irq_data))
return -EINVAL;
/*
* If the interrupt is running in the thread context of the parent
* irq we need to be careful, because we cannot trigger it
* directly.
*/
if (irq_settings_is_nested_thread(desc)) {
/*
* If the parent_irq is valid, we retrigger the parent,
* otherwise we do nothing.
*/
if (!desc->parent_irq)
return -EINVAL;
irq = desc->parent_irq;
}
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
return 0;
}
#else
static int irq_sw_resend(struct irq_desc *desc)
{
return -EINVAL;
}
#endif #endif
/* /*
...@@ -54,49 +91,83 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); ...@@ -54,49 +91,83 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
* *
* Is called with interrupts disabled and desc->lock held. * Is called with interrupts disabled and desc->lock held.
*/ */
void check_irq_resend(struct irq_desc *desc) int check_irq_resend(struct irq_desc *desc, bool inject)
{ {
int err = 0;
/* /*
* We do not resend level type interrupts. Level type * We do not resend level type interrupts. Level type interrupts
* interrupts are resent by hardware when they are still * are resent by hardware when they are still active. Clear the
* active. Clear the pending bit so suspend/resume does not * pending bit so suspend/resume does not get confused.
* get confused.
*/ */
if (irq_settings_is_level(desc)) { if (irq_settings_is_level(desc)) {
desc->istate &= ~IRQS_PENDING; desc->istate &= ~IRQS_PENDING;
return; return -EINVAL;
} }
if (desc->istate & IRQS_REPLAY) if (desc->istate & IRQS_REPLAY)
return; return -EBUSY;
if (desc->istate & IRQS_PENDING) {
desc->istate &= ~IRQS_PENDING; if (!(desc->istate & IRQS_PENDING) && !inject)
return 0;
desc->istate &= ~IRQS_PENDING;
if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data))
err = irq_sw_resend(desc);
/* If the retrigger was successfull, mark it with the REPLAY bit */
if (!err)
desc->istate |= IRQS_REPLAY; desc->istate |= IRQS_REPLAY;
return err;
}
if (!desc->irq_data.chip->irq_retrigger || #ifdef CONFIG_GENERIC_IRQ_INJECTION
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { /**
#ifdef CONFIG_HARDIRQS_SW_RESEND * irq_inject_interrupt - Inject an interrupt for testing/error injection
unsigned int irq = irq_desc_get_irq(desc); * @irq: The interrupt number
*
/* * This function must only be used for debug and testing purposes!
* If the interrupt is running in the thread *
* context of the parent irq we need to be * Especially on x86 this can cause a premature completion of an interrupt
* careful, because we cannot trigger it * affinity change causing the interrupt line to become stale. Very
* directly. * unlikely, but possible.
*/ *
if (irq_settings_is_nested_thread(desc)) { * The injection can fail for various reasons:
/* * - Interrupt is not activated
* If the parent_irq is valid, we * - Interrupt is NMI type or currently replaying
* retrigger the parent, otherwise we * - Interrupt is level type
* do nothing. * - Interrupt does not support hardware retrigger and software resend is
*/ * either not enabled or not possible for the interrupt.
if (!desc->parent_irq) */
return; int irq_inject_interrupt(unsigned int irq)
irq = desc->parent_irq; {
} struct irq_desc *desc;
/* Set it pending and activate the softirq: */ unsigned long flags;
set_bit(irq, irqs_resend); int err;
tasklet_schedule(&resend_tasklet);
#endif /* Try the state injection hardware interface first */
} if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true))
} return 0;
/* That failed, try via the resend mechanism */
desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return -EINVAL;
/*
* Only try to inject when the interrupt is:
* - not NMI type
* - activated
*/
if ((desc->istate & IRQS_NMI) || !irqd_is_activated(&desc->irq_data))
err = -EINVAL;
else
err = check_irq_resend(desc, true);
irq_put_desc_busunlock(desc, flags);
return err;
} }
EXPORT_SYMBOL_GPL(irq_inject_interrupt);
#endif
...@@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */ /* GICv4 support? */
if (info->has_v4) { if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable; kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_info("GICv4 support %sabled\n", kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
kvm_info("GICv4%s support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
gicv4_enable ? "en" : "dis"); gicv4_enable ? "en" : "dis");
} }
......
...@@ -67,10 +67,10 @@ ...@@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must * it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor). * tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the * This is done in two steps: first issue a irq_set_affinity() on the
* irq corresponding to the vcpu, then call its_schedule_vpe(). You * irq corresponding to the vcpu, then call its_make_vpe_resident().
* must be in a non-preemptible context. On exit, another call to * You must be in a non-preemptible context. On exit, a call to
* its_schedule_vpe() tells the redistributor that we're done with the * its_make_vpe_non_resident() tells the redistributor that we're done
* vcpu. * with the vcpu.
* *
* Finally, the doorbell handling: Each vcpu is allocated an interrupt * Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is * which will fire each time a VLPI is made pending whilst the vcpu is
...@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) ...@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct kvm_vcpu *vcpu = info; struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */ /* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) if (!kvm_vgic_global_state.has_gicv4_1 &&
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq); disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
...@@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm) ...@@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{ {
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0; return 0;
/* return its_make_vpe_non_resident(vpe, need_db);
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
return its_schedule_vpe(vpe, false);
} }
int vgic_v4_load(struct kvm_vcpu *vcpu) int vgic_v4_load(struct kvm_vcpu *vcpu)
...@@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) ...@@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if (err) if (err)
return err; return err;
/* Disabled the doorbell, as we're about to enter the guest */ err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
disable_irq_nosync(vpe->irq);
err = its_schedule_vpe(vpe, true);
if (err) if (err)
return err; return err;
/* /*
* Now that the VPE is resident, let's get rid of a potential * Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending. * doorbell interrupt that would still be pending. This is a
* GICv4.0 only "feature"...
*/ */
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); if (!kvm_vgic_global_state.has_gicv4_1)
err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
return err;
} }
static struct vgic_its *vgic_get_its(struct kvm *kvm, static struct vgic_its *vgic_get_its(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment