Commit 2d385336 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "Updates for the interrupt subsystem:

  Treewide:

    - Cleanup of setup_irq() which is not longer required because the
      memory allocator is available early.

      Most cleanup changes come through the various maintainer trees, so
      the final removal of setup_irq() is postponed towards the end of
      the merge window.

  Core:

    - Protection against unsafe invocation of interrupt handlers and
      unsafe interrupt injection including a fixup of the offending
      PCI/AER error injection mechanism.

      Invoking interrupt handlers from arbitrary contexts, i.e. outside
      of an actual interrupt, can cause inconsistent state on the
      fragile x86 interrupt affinity changing hardware trainwreck.

  Drivers:

    - Second wave of support for the new ARM GICv4.1

    - Multi-instance support for Xilinx and PLIC interrupt controllers

    - CPU-Hotplug support for PLIC

    - The obligatory new driver for X1000 TCU

    - Enhancements, cleanups and fixes all over the place"

* tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (58 commits)
  unicore32: Replace setup_irq() by request_irq()
  sh: Replace setup_irq() by request_irq()
  hexagon: Replace setup_irq() by request_irq()
  c6x: Replace setup_irq() by request_irq()
  alpha: Replace setup_irq() by request_irq()
  irqchip/gic-v4.1: Eagerly vmap vPEs
  irqchip/gic-v4.1: Add VSGI property setup
  irqchip/gic-v4.1: Add VSGI allocation/teardown
  irqchip/gic-v4.1: Move doorbell management to the GICv4 abstraction layer
  irqchip/gic-v4.1: Plumb set_vcpu_affinity SGI callbacks
  irqchip/gic-v4.1: Plumb get/set_irqchip_state SGI callbacks
  irqchip/gic-v4.1: Plumb mask/unmask SGI callbacks
  irqchip/gic-v4.1: Add initial SGI configuration
  irqchip/gic-v4.1: Plumb skeletal VSGI irqchip
  irqchip/stm32: Retrigger both in eoi and unmask callbacks
  irqchip/gic-v3: Move irq_domain_update_bus_token to after checking for NULL domain
  irqchip/xilinx: Do not call irq_set_default_host()
  irqchip/xilinx: Enable generic irq multi handler
  irqchip/xilinx: Fill error code when irq domain registration fails
  irqchip/xilinx: Add support for multiple instances
  ...
parents 673b41e0 8a13b02a
......@@ -213,32 +213,13 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
* The special RTC interrupt type. The interrupt itself was
* processed by PALcode, and comes in via entInt vector 1.
*/
struct irqaction timer_irqaction = {
.handler = rtc_timer_interrupt,
.name = "timer",
};
void __init
init_rtc_irq(void)
init_rtc_irq(irq_handler_t handler)
{
irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
handle_percpu_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
if (!handler)
handler = rtc_timer_interrupt;
if (request_irq(RTC_IRQ, handler, 0, "timer", NULL))
pr_err("Failed to register timer interrupt\n");
}
/* Dummy irqactions. */
struct irqaction isa_cascade_irqaction = {
.handler = no_action,
.name = "isa-cascade"
};
struct irqaction timer_cascade_irqaction = {
.handler = no_action,
.name = "timer-cascade"
};
struct irqaction halt_switch_irqaction = {
.handler = no_action,
.name = "halt-switch"
};
......@@ -82,11 +82,6 @@ struct irq_chip i8259a_irq_type = {
void __init
init_i8259a_irqs(void)
{
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
};
long i;
outb(0xff, 0x21); /* mask all of 8259A-1 */
......@@ -96,7 +91,8 @@ init_i8259a_irqs(void)
irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
}
setup_irq(2, &cascade);
if (request_irq(2, no_action, 0, "cascade", NULL))
pr_err("Failed to request irq 2 (cascade)\n");
}
......
......@@ -21,14 +21,9 @@ extern void isa_no_iack_sc_device_interrupt(unsigned long);
extern void srm_device_interrupt(unsigned long);
extern void pyxis_device_interrupt(unsigned long);
extern struct irqaction timer_irqaction;
extern struct irqaction isa_cascade_irqaction;
extern struct irqaction timer_cascade_irqaction;
extern struct irqaction halt_switch_irqaction;
extern void init_srm_irqs(long, unsigned long);
extern void init_pyxis_irqs(unsigned long);
extern void init_rtc_irq(void);
extern void init_rtc_irq(irq_handler_t handler);
extern void common_init_isa_dma(void);
......
......@@ -107,5 +107,6 @@ init_pyxis_irqs(unsigned long ignore_mask)
irq_set_status_flags(i, IRQ_LEVEL);
}
setup_irq(16+7, &isa_cascade_irqaction);
if (request_irq(16 + 7, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
}
......@@ -133,7 +133,8 @@ alcor_init_irq(void)
init_i8259a_irqs();
common_init_isa_dma();
setup_irq(16+31, &isa_cascade_irqaction);
if (request_irq(16 + 31, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
}
......
......@@ -112,7 +112,8 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
}
common_init_isa_dma();
setup_irq(16+4, &isa_cascade_irqaction);
if (request_irq(16 + 4, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
}
#ifndef CONFIG_ALPHA_PC164
......
......@@ -123,7 +123,8 @@ eb64p_init_irq(void)
}
common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction);
if (request_irq(16 + 5, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
}
/*
......
......@@ -397,7 +397,7 @@ marvel_init_pci(void)
static void __init
marvel_init_rtc(void)
{
init_rtc_irq();
init_rtc_irq(NULL);
}
static void
......
......@@ -81,8 +81,10 @@ miata_init_irq(void)
init_pyxis_irqs(0x63b0000);
common_init_isa_dma();
setup_irq(16+2, &halt_switch_irqaction); /* SRM only? */
setup_irq(16+6, &timer_cascade_irqaction);
if (request_irq(16 + 2, no_action, 0, "halt-switch", NULL))
pr_err("Failed to register halt-switch interrupt\n");
if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL))
pr_err("Failed to register timer-cascade interrupt\n");
}
......
......@@ -82,7 +82,8 @@ ruffian_init_rtc(void)
outb(0x31, 0x42);
outb(0x13, 0x42);
setup_irq(0, &timer_irqaction);
if (request_irq(0, rtc_timer_interrupt, 0, "timer", NULL))
pr_err("Failed to request irq 0 (timer)\n");
}
static void
......
......@@ -106,7 +106,8 @@ rx164_init_irq(void)
init_i8259a_irqs();
common_init_isa_dma();
setup_irq(16+20, &isa_cascade_irqaction);
if (request_irq(16 + 20, no_action, 0, "isa-cascade", NULL))
pr_err("Failed to register isa-cascade interrupt\n");
}
......
......@@ -54,7 +54,8 @@ sx164_init_irq(void)
else
init_pyxis_irqs(0xff00003f0000UL);
setup_irq(16+6, &timer_cascade_irqaction);
if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL))
pr_err("Failed to register timer-cascade interrupt\n");
}
/*
......
......@@ -156,10 +156,6 @@ static void __init
wildfire_init_irq_per_pca(int qbbno, int pcano)
{
int i, irq_bias;
static struct irqaction isa_enable = {
.handler = no_action,
.name = "isa_enable",
};
irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
+ pcano * WILDFIRE_IRQ_PER_PCA;
......@@ -198,7 +194,8 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
setup_irq(32+irq_bias, &isa_enable);
if (request_irq(32 + irq_bias, no_action, 0, "isa_enable", NULL))
pr_err("Failed to register isa_enable interrupt\n");
}
static void __init
......
......@@ -242,7 +242,7 @@ common_init_rtc(void)
outb(0x31, 0x42);
outb(0x13, 0x42);
init_rtc_irq();
init_rtc_irq(NULL);
}
......@@ -396,9 +396,7 @@ time_init(void)
if (alpha_using_qemu) {
clocksource_register_hz(&qemu_cs, NSEC_PER_SEC);
init_qemu_clockevent();
timer_irqaction.handler = qemu_timer_interrupt;
init_rtc_irq();
init_rtc_irq(qemu_timer_interrupt);
return;
}
......
......@@ -302,10 +302,13 @@ static int sa1111_retrigger_irq(struct irq_data *d)
break;
}
if (i == 8)
if (i == 8) {
pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
d->irq);
return i == 8 ? -1 : 0;
return 0;
}
return 1;
}
static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
......
......@@ -165,13 +165,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static struct irqaction timer_iact = {
.name = "timer",
.flags = IRQF_TIMER,
.handler = timer_interrupt,
.dev_id = &t64_clockevent_device,
};
void __init timer64_init(void)
{
struct clock_event_device *cd = &t64_clockevent_device;
......@@ -238,7 +231,9 @@ void __init timer64_init(void)
cd->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(cd);
setup_irq(cd->irq, &timer_iact);
if (request_irq(cd->irq, timer_interrupt, IRQF_TIMER, "timer",
&t64_clockevent_device))
pr_err("Failed to request irq %d (timer)\n", cd->irq);
out:
of_node_put(np);
......
......@@ -114,12 +114,6 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
local_irq_restore(flags);
}
static struct irqaction ipi_intdesc = {
.handler = handle_ipi,
.flags = IRQF_TRIGGER_RISING,
.name = "ipi_handler"
};
void __init smp_prepare_boot_cpu(void)
{
}
......@@ -132,8 +126,8 @@ void __init smp_prepare_boot_cpu(void)
void start_secondary(void)
{
unsigned int cpu;
unsigned long thread_ptr;
unsigned int cpu, irq;
/* Calculate thread_info pointer from stack pointer */
__asm__ __volatile__(
......@@ -155,7 +149,10 @@ void start_secondary(void)
cpu = smp_processor_id();
setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc);
irq = BASE_IPI_IRQ + cpu;
if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, "ipi_handler",
NULL))
pr_err("Failed to request irq %u (ipi_handler)\n", irq);
/* Register the clock_event dummy */
setup_percpu_clockdev();
......@@ -201,7 +198,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i;
int i, irq = BASE_IPI_IRQ;
/*
* should eventually have some sort of machine
......@@ -213,8 +210,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
set_cpu_present(i, true);
/* Also need to register the interrupts for IPI */
if (max_cpus > 1)
setup_irq(BASE_IPI_IRQ, &ipi_intdesc);
if (max_cpus > 1) {
if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING,
"ipi_handler", NULL))
pr_err("Failed to request irq %d (ipi_handler)\n", irq);
}
}
void smp_send_reschedule(int cpu)
......
......@@ -143,13 +143,6 @@ static irqreturn_t timer_interrupt(int irq, void *devid)
return IRQ_HANDLED;
}
/* This should also be pulled from devtree */
static struct irqaction rtos_timer_intdesc = {
.handler = timer_interrupt,
.flags = IRQF_TIMER | IRQF_TRIGGER_RISING,
.name = "rtos_timer"
};
/*
* time_init_deferred - called by start_kernel to set up timer/clock source
*
......@@ -163,6 +156,7 @@ void __init time_init_deferred(void)
{
struct resource *resource = NULL;
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
unsigned long flag = IRQF_TIMER | IRQF_TRIGGER_RISING;
ce_dev->cpumask = cpu_all_mask;
......@@ -195,7 +189,8 @@ void __init time_init_deferred(void)
#endif
clockevents_register_device(ce_dev);
setup_irq(ce_dev->irq, &rtos_timer_intdesc);
if (request_irq(ce_dev->irq, timer_interrupt, flag, "rtos_timer", NULL))
pr_err("Failed to register rtos_timer interrupt\n");
}
void __init time_init(void)
......
......@@ -47,6 +47,8 @@ config MICROBLAZE
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU
select SPARSE_IRQ
select GENERIC_IRQ_MULTI_HANDLER
select HANDLE_DOMAIN_IRQ
# Endianness selection
choice
......
......@@ -14,7 +14,4 @@
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
/* should be defined in each interrupt controller driver */
extern unsigned int xintc_get_irq(void);
#endif /* _ASM_MICROBLAZE_IRQ_H */
......@@ -20,29 +20,10 @@
#include <linux/irqchip.h>
#include <linux/of_irq.h>
static u32 concurrent_irq;
void __irq_entry do_IRQ(struct pt_regs *regs)
{
unsigned int irq;
struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off();
irq_enter();
irq = xintc_get_irq();
next_irq:
BUG_ON(!irq);
generic_handle_irq(irq);
irq = xintc_get_irq();
if (irq != -1U) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
goto next_irq;
}
irq_exit();
set_irq_regs(old_regs);
handle_arch_irq(regs);
trace_hardirqs_on();
}
......
......@@ -157,5 +157,5 @@ void __init trap_init(void)
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
/* Enable interrupts */
csr_write(CSR_IE, IE_SIE | IE_EIE);
csr_write(CSR_IE, IE_SIE);
}
......@@ -40,16 +40,6 @@ static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id)
return IRQ_NONE;
}
static struct irqaction cayman_action_smsc = {
.name = "Cayman SMSC Mux",
.handler = cayman_interrupt_smsc,
};
static struct irqaction cayman_action_pci2 = {
.name = "Cayman PCI2 Mux",
.handler = cayman_interrupt_pci2,
};
static void enable_cayman_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
......@@ -149,6 +139,10 @@ void init_cayman_irq(void)
}
/* Setup the SMSC interrupt */
setup_irq(SMSC_IRQ, &cayman_action_smsc);
setup_irq(PCI2_IRQ, &cayman_action_pci2);
if (request_irq(SMSC_IRQ, cayman_interrupt_smsc, 0, "Cayman SMSC Mux",
NULL))
pr_err("Failed to register Cayman SMSC Mux interrupt\n");
if (request_irq(PCI2_IRQ, cayman_interrupt_pci2, 0, "Cayman PCI2 Mux",
NULL))
pr_err("Failed to register Cayman PCI2 Mux interrupt\n");
}
......@@ -64,11 +64,6 @@ static int pvr2_xfer_dma(struct dma_channel *chan)
return 0;
}
static struct irqaction pvr2_dma_irq = {
.name = "pvr2 DMA handler",
.handler = pvr2_dma_interrupt,
};
static struct dma_ops pvr2_dma_ops = {
.request = pvr2_request_dma,
.get_residue = pvr2_get_dma_residue,
......@@ -84,7 +79,9 @@ static struct dma_info pvr2_dma_info = {
static int __init pvr2_dma_init(void)
{
setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq);
if (request_irq(HW_EVENT_PVR2_DMA, pvr2_dma_interrupt, 0,
"pvr2 DMA handler", NULL))
pr_err("Failed to register pvr2 DMA handler interrupt\n");
request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade");
return register_dmac(&pvr2_dma_info);
......
......@@ -72,13 +72,6 @@ static struct clocksource cksrc_puv3_oscr = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct irqaction puv3_timer_irq = {
.name = "ost0",
.flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = puv3_ost0_interrupt,
.dev_id = &ckevt_puv3_osmr0,
};
void __init time_init(void)
{
writel(0, OST_OIER); /* disable any timer interrupts */
......@@ -94,7 +87,9 @@ void __init time_init(void)
ckevt_puv3_osmr0.min_delta_ticks = MIN_OSCR_DELTA * 2;
ckevt_puv3_osmr0.cpumask = cpumask_of(0);
setup_irq(IRQ_TIMER0, &puv3_timer_irq);
if (request_irq(IRQ_TIMER0, puv3_ost0_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "ost0", &ckevt_puv3_osmr0))
pr_err("Failed to register ost0 interrupt\n");
clocksource_register_hz(&cksrc_puv3_oscr, CLOCK_TICK_RATE);
clockevents_register_device(&ckevt_puv3_osmr0);
......
......@@ -128,6 +128,7 @@ config X86
select GENERIC_GETTIMEOFDAY
select GENERIC_VDSO_TIME_NS
select GUP_GET_PTE_LOW_HIGH if X86_PAE
select HARDIRQS_SW_RESEND
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI
......
......@@ -556,6 +556,12 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irqd->chip_data = apicd;
irqd->hwirq = virq + i;
irqd_set_single_target(irqd);
/*
* Prevent that any of these interrupts is invoked in
* non interrupt context via e.g. generic_handle_irq()
* as that can corrupt the affinity move state.
*/
irqd_set_handle_enforce_irqctx(irqd);
/*
* Legacy vectors are already assigned when the IOAPIC
* takes them over. They stay on the same vector. This is
......
......@@ -458,7 +458,7 @@ config IMX_IRQSTEER
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
config IMX_INTMUX
def_bool y if ARCH_MXC
def_bool y if ARCH_MXC || COMPILE_TEST
select IRQ_DOMAIN
help
Support for the i.MX INTMUX interrupt multiplexer.
......
......@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d)
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
irq_gc_unlock(gc);
return 0;
return 1;
}
static int aic_set_type(struct irq_data *d, unsigned type)
......
......@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d)
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
irq_gc_unlock(bgc);
return 0;
return 1;
}
static int aic5_set_type(struct irq_data *d, unsigned type)
......
......@@ -61,6 +61,7 @@
| SHORTCUT1_MASK | SHORTCUT2_MASK)
#define REG_FIQ_CONTROL 0x0c
#define FIQ_CONTROL_ENABLE BIT(7)
#define NR_BANKS 3
#define IRQS_PER_BANK 32
......@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node,
{
void __iomem *base;
int irq, b, i;
u32 reg;
base = of_iomap(node, 0);
if (!base)
......@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node,
handle_level_irq);
irq_set_probe(irq);
}
reg = readl_relaxed(intc.enable[b]);
if (reg) {
writel_relaxed(reg, intc.disable[b]);
pr_err(FW_BUG "Bootloader left irq enabled: "
"bank %d irq %*pbl\n", b, IRQS_PER_BANK, &reg);
}
}
reg = readl_relaxed(base + REG_FIQ_CONTROL);
if (reg & FIQ_CONTROL_ENABLE) {
writel_relaxed(0, base + REG_FIQ_CONTROL);
pr_err(FW_BUG "Bootloader left fiq enabled\n");
}
if (is_2836) {
......
......@@ -50,7 +50,7 @@ struct bcm7038_l1_chip {
struct bcm7038_l1_cpu {
void __iomem *map_base;
u32 mask_cache[0];
u32 mask_cache[];
};
/*
......
This diff is collapsed.
......@@ -724,6 +724,7 @@ static void __init gic_dist_init(void)
unsigned int i;
u64 affinity;
void __iomem *base = gic_data.dist_base;
u32 val;
/* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR);
......@@ -756,9 +757,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
pr_info("Enabling SGIs without active state\n");
val |= GICD_CTLR_nASSGIreq;
}
/* Enable distributor with ARE, Group1 */
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
base + GICD_CTLR);
writel_relaxed(val, base + GICD_CTLR);
/*
* Set all global interrupts to the boot CPU only. ARE must be
......@@ -829,6 +835,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base;
raw_spin_lock_init(&gic_data_rdist()->rd_lock);
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset;
......@@ -1609,7 +1616,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
......@@ -1620,6 +1626,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free;
}
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
pr_info("Distributor has %sRange Selector support\n",
gic_data.has_rss ? "" : "no ");
......@@ -1785,6 +1793,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
......@@ -2100,6 +2109,7 @@ static void __init gic_acpi_setup_kvm_info(void)
}
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
......
......@@ -85,6 +85,53 @@
static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops;
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
{
char *name;
int sgi_base;
if (!has_v4_1())
return 0;
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
if (!name)
goto err;
vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
if (!vpe->fwnode)
goto err;
kfree(name);
name = NULL;
vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
sgi_domain_ops, vpe);
if (!vpe->sgi_domain)
goto err;
sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
NUMA_NO_NODE, vpe,
false, NULL);
if (sgi_base <= 0)
goto err;
return 0;
err:
if (vpe->sgi_domain)
irq_domain_remove(vpe->sgi_domain);
if (vpe->fwnode)
irq_domain_free_fwnode(vpe->fwnode);
kfree(name);
return -ENOMEM;
}
int its_alloc_vcpu_irqs(struct its_vm *vm)
{
......@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if (vpe_base_irq <= 0)
goto err;
for (i = 0; i < vm->nr_vpes; i++)
for (i = 0; i < vm->nr_vpes; i++) {
int ret;
vm->vpes[i]->irq = vpe_base_irq + i;
ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
if (ret)
goto err;
}
return 0;
......@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
return -ENOMEM;
}
static void its_free_sgi_irqs(struct its_vm *vm)
{
int i;
if (!has_v4_1())
return;
for (i = 0; i < vm->nr_vpes; i++) {
unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
if (WARN_ON(!irq))
continue;
irq_domain_free_irqs(irq, 16);
irq_domain_remove(vm->vpes[i]->sgi_domain);
irq_domain_free_fwnode(vm->vpes[i]->fwnode);
}
}
void its_free_vcpu_irqs(struct its_vm *vm)
{
its_free_sgi_irqs(vm);
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode);
......@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info);
}
int its_schedule_vpe(struct its_vpe *vpe, bool on)
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
{
struct irq_desc *desc = irq_to_desc(vpe->irq);
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = DESCHEDULE_VPE;
if (has_v4_1()) {
/* GICv4.1 can directly deal with doorbells */
info.req_db = db;
} else {
/* Undo the nested disable_irq() calls... */
while (db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = false;
return ret;
}
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{
struct its_cmd_info info;
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
info.cmd_type = SCHEDULE_VPE;
if (has_v4_1()) {
info.g0en = g0en;
info.g1en = g1en;
} else {
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = on;
vpe->resident = true;
return ret;
}
......@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
int its_prop_update_vsgi(int irq, u8 priority, bool group)
{
struct its_cmd_info info = {
.cmd_type = PROP_UPDATE_VSGI,
{
.priority = priority,
.group = group,
},
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops)
{
if (domain) {
pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain;
vpe_domain_ops = ops;
vpe_domain_ops = vpe_ops;
sgi_domain_ops = sgi_ops;
return 0;
}
......
......@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {
.name = "pic1",
.start = PIC_MASTER_CMD,
......@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = {
*/
struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
{
/*
* PIC_CASCADE_IR is cascade interrupt to second interrupt controller
*/
int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
struct irq_domain *domain;
insert_resource(&ioport_resource, &pic1_io_resource);
......@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (!domain)
panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to register cascade interrupt\n");
register_syscore_ops(&i8259_syscore_ops);
return domain;
}
......
......@@ -180,3 +180,4 @@ static int __init ingenic_tcu_irq_init(struct device_node *np,
IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
......@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data)
return IRQ_HANDLED;
}
static struct irqaction intc_cascade_action = {
.handler = intc_cascade,
.name = "SoC intc cascade interrupt",
};
static int __init ingenic_intc_of_init(struct device_node *node,
unsigned num_chips)
{
......@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node,
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
setup_irq(parent_irq, &intc_cascade_action);
if (request_irq(parent_irq, intc_cascade, 0,
"SoC intc cascade interrupt", NULL))
pr_err("Failed to register SoC intc cascade interrupt\n");
return 0;
out_domain_remove:
......
......@@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
i->iomem = devm_ioremap(dev, io[k]->start,
resource_size(io[k]));
resource_size(io[k]));
if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n");
ret = -ENXIO;
......
......@@ -4,6 +4,7 @@
* Copyright (C) 2018 Christoph Hellwig
*/
#define pr_fmt(fmt) "plic: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
......@@ -55,7 +56,14 @@
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
static void __iomem *plic_regs;
#define PLIC_DISABLE_THRESHOLD 0xf
#define PLIC_ENABLE_THRESHOLD 0
struct plic_priv {
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
};
struct plic_handler {
bool present;
......@@ -66,6 +74,7 @@ struct plic_handler {
*/
raw_spinlock_t enable_lock;
void __iomem *enable_base;
struct plic_priv *priv;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
......@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler,
}
static inline void plic_irq_toggle(const struct cpumask *mask,
int hwirq, int enable)
struct irq_data *d, int enable)
{
int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present)
plic_toggle(handler, hwirq, enable);
if (handler->present &&
cpumask_test_cpu(cpu, &handler->priv->lmask))
plic_toggle(handler, d->hwirq, enable);
}
}
static void plic_irq_unmask(struct irq_data *d)
{
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
struct cpumask amask;
unsigned int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
&amask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return;
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
plic_irq_toggle(cpumask_of(cpu), d, 1);
}
static void plic_irq_mask(struct irq_data *d)
{
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
struct plic_priv *priv = irq_get_chip_data(d->irq);
plic_irq_toggle(&priv->lmask, d, 0);
}
#ifdef CONFIG_SMP
......@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
struct cpumask amask;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, mask_val);
if (force)
cpu = cpumask_first(mask_val);
cpu = cpumask_first(&amask);
else
cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu = cpumask_any_and(&amask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
plic_irq_toggle(&priv->lmask, d, 0);
plic_irq_toggle(cpumask_of(cpu), d, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
......@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.free = irq_domain_free_irqs_top,
};
static struct irq_domain *plic_irqdomain;
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
......@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs)
csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq);
int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
......@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node)
return -1;
}
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
{
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
}
static int plic_dying_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_clear(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
return 0;
}
static int plic_starting_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_set(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
}
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
struct plic_priv *priv;
if (plic_regs) {
pr_warn("PLIC already present.\n");
return -ENXIO;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
plic_regs = of_iomap(node, 0);
if (WARN_ON(!plic_regs))
return -EIO;
priv->regs = of_iomap(node, 0);
if (WARN_ON(!priv->regs)) {
error = -EIO;
goto out_free_priv;
}
error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
......@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node,
goto out_iounmap;
error = -ENOMEM;
plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, NULL);
if (WARN_ON(!plic_irqdomain))
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, priv);
if (WARN_ON(!priv->irqdomain))
goto out_iounmap;
for (i = 0; i < nr_contexts; i++) {
......@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
......@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node,
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
pr_warn("handler already present for context %d.\n", i);
threshold = 0xffffffff;
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
handler->hart_base =
plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base =
plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
handler->priv = priv;
done:
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
nr_handlers++;
}
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
out_iounmap:
iounmap(plic_regs);
iounmap(priv->regs);
out_free_priv:
kfree(priv);
return error;
}
......
......@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void)
unregister_syscore_ops(&stm32_exti_h_syscore_ops);
}
static int stm32_exti_h_retrigger(struct irq_data *d)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
void __iomem *base = chip_data->host_data->base;
u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
writel_relaxed(mask, base + stm32_bank->swier_ofst);
return 0;
}
static struct irq_chip stm32_exti_h_chip = {
.name = "stm32-exti-h",
.irq_eoi = stm32_exti_h_eoi,
.irq_mask = stm32_exti_h_mask,
.irq_unmask = stm32_exti_h_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_retrigger = stm32_exti_h_retrigger,
.irq_set_type = stm32_exti_h_set_type,
.irq_set_wake = stm32_exti_h_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND,
......
......@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
......@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
u32 status = readl(f->base + IRQ_STATUS);
u32 status;
chained_irq_enter(chip, desc);
status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(desc);
return;
goto out;
}
do {
......@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
out:
chained_irq_exit(chip, desc);
}
/*
......@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
......@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/*
* On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need
......
......@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node,
void __iomem *regs;
u32 interrupt_mask = ~0;
u32 wakeup_mask = ~0;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
int parent_irq;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
......@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node,
of_property_read_u32(node, "valid-mask", &interrupt_mask);
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
parent_irq = of_irq_get(node, 0);
if (parent_irq < 0)
parent_irq = 0;
/*
* Passing 0 as first IRQ makes the simple domain allocate descriptors
*/
__vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
__vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
return 0;
}
......
......@@ -38,29 +38,31 @@ struct xintc_irq_chip {
void __iomem *base;
struct irq_domain *root_domain;
u32 intr_mask;
u32 nr_irq;
};
static struct xintc_irq_chip *xintc_irqc;
static struct xintc_irq_chip *primary_intc;
static void xintc_write(int reg, u32 data)
static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
{
if (static_branch_unlikely(&xintc_is_be))
iowrite32be(data, xintc_irqc->base + reg);
iowrite32be(data, irqc->base + reg);
else
iowrite32(data, xintc_irqc->base + reg);
iowrite32(data, irqc->base + reg);
}
static unsigned int xintc_read(int reg)
static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
{
if (static_branch_unlikely(&xintc_is_be))
return ioread32be(xintc_irqc->base + reg);
return ioread32be(irqc->base + reg);
else
return ioread32(xintc_irqc->base + reg);
return ioread32(irqc->base + reg);
}
static void intc_enable_or_unmask(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
......@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler
*/
if (irqd_is_level_type(d))
xintc_write(IAR, mask);
xintc_write(irqc, IAR, mask);
xintc_write(SIE, mask);
xintc_write(irqc, SIE, mask);
}
static void intc_disable_or_mask(struct irq_data *d)
{
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
xintc_write(CIE, 1 << d->hwirq);
xintc_write(irqc, CIE, BIT(d->hwirq));
}
static void intc_ack(struct irq_data *d)
{
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
xintc_write(IAR, 1 << d->hwirq);
xintc_write(irqc, IAR, BIT(d->hwirq));
}
static void intc_mask_ack(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
xintc_write(CIE, mask);
xintc_write(IAR, mask);
xintc_write(irqc, CIE, mask);
xintc_write(irqc, IAR, mask);
}
static struct irq_chip intc_dev = {
......@@ -103,13 +110,14 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack,
};
unsigned int xintc_get_irq(void)
static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
{
unsigned int hwirq, irq = -1;
unsigned int irq = 0;
u32 hwirq;
hwirq = xintc_read(IVR);
hwirq = xintc_read(irqc, IVR);
if (hwirq != -1U)
irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
irq = irq_find_mapping(irqc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
......@@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
if (xintc_irqc->intr_mask & (1 << hw)) {
struct xintc_irq_chip *irqc = d->host_data;
if (irqc->intr_mask & BIT(hw)) {
irq_set_chip_and_handler_name(irq, &intc_dev,
handle_edge_irq, "edge");
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {
irq_set_chip_and_handler_name(irq, &intc_dev,
handle_level_irq, "level");
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
irq_set_chip_data(irq, irqc);
return 0;
}
......@@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct xintc_irq_chip *irqc;
u32 pending;
irqc = irq_data_get_irq_handler_data(&desc->irq_data);
chained_irq_enter(chip, desc);
do {
pending = xintc_get_irq();
if (pending == -1U)
pending = xintc_get_irq_local(irqc);
if (pending == 0)
break;
generic_handle_irq(pending);
} while (true);
chained_irq_exit(chip, desc);
}
static void xil_intc_handle_irq(struct pt_regs *regs)
{
u32 hwirq;
struct xintc_irq_chip *irqc = primary_intc;
do {
hwirq = xintc_read(irqc, IVR);
if (likely(hwirq != -1U)) {
int ret;
ret = handle_domain_irq(irqc->root_domain, hwirq, regs);
WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq);
continue;
}
break;
} while (1);
}
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
u32 nr_irq;
int ret, irq;
struct xintc_irq_chip *irqc;
if (xintc_irqc) {
pr_err("irq-xilinx: Multiple instances aren't supported\n");
return -EINVAL;
}
int ret, irq;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc)
return -ENOMEM;
xintc_irqc = irqc;
irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base);
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
goto err_alloc;
goto error;
}
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
......@@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
if (irqc->intr_mask >> nr_irq)
if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
intc, nr_irq, irqc->intr_mask);
intc, irqc->nr_irq, irqc->intr_mask);
/*
* Disable all external interrupts until they are
* explicity requested.
*/
xintc_write(IER, 0);
xintc_write(irqc, IER, 0);
/* Acknowledge any pending interrupts just in case. */
xintc_write(IAR, 0xffffffff);
xintc_write(irqc, IAR, 0xffffffff);
/* Turn on the Master Enable. */
xintc_write(MER, MER_HIE | MER_ME);
if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
xintc_write(irqc, MER, MER_HIE | MER_ME);
if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
static_branch_enable(&xintc_is_be);
xintc_write(MER, MER_HIE | MER_ME);
xintc_write(irqc, MER, MER_HIE | MER_ME);
}
irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
goto err_alloc;
ret = -EINVAL;
goto error;
}
if (parent) {
......@@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else {
pr_err("irq-xilinx: interrupts property not in DT\n");
ret = -EINVAL;
goto err_alloc;
goto error;
}
} else {
irq_set_default_host(irqc->root_domain);
primary_intc = irqc;
set_handle_irq(xil_intc_handle_irq);
}
return 0;
err_alloc:
xintc_irqc = NULL;
error:
iounmap(irqc->base);
kfree(irqc);
return ret;
......
......@@ -33,7 +33,7 @@ struct combiner {
int parent_irq;
u32 nirqs;
u32 nregs;
struct combiner_reg regs[0];
struct combiner_reg regs[];
};
static inline int irq_nr(u32 reg, u32 bit)
......
......@@ -34,6 +34,7 @@ config PCIEAER
config PCIEAER_INJECT
tristate "PCI Express error injection support"
depends on PCIEAER
select GENERIC_IRQ_INJECTION
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
......
......@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/slab.h>
......@@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj)
}
pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
local_irq_disable();
generic_handle_irq(edev->irq);
local_irq_enable();
ret = irq_inject_interrupt(edev->irq);
} else {
pci_err(rpdev, "AER device not found\n");
ret = -ENODEV;
......
......@@ -92,6 +92,7 @@ struct stm32_gpio_bank {
u32 bank_nr;
u32 bank_ioport_nr;
u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
u8 irq_type[STM32_GPIO_PINS_PER_BANK];
};
struct stm32_pinctrl {
......@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = {
.get_direction = stm32_gpio_get_direction,
};
static void stm32_gpio_irq_trigger(struct irq_data *d)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
int level;
/* If level interrupt type then retrig */
level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
}
static void stm32_gpio_irq_eoi(struct irq_data *d)
{
irq_chip_eoi_parent(d);
stm32_gpio_irq_trigger(d);
};
static int stm32_gpio_set_type(struct irq_data *d, unsigned int type)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
u32 parent_type;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
parent_type = type;
break;
case IRQ_TYPE_LEVEL_HIGH:
parent_type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
parent_type = IRQ_TYPE_EDGE_FALLING;
break;
default:
return -EINVAL;
}
bank->irq_type[d->hwirq] = type;
return irq_chip_set_type_parent(d, parent_type);
};
static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
......@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
static void stm32_gpio_irq_unmask(struct irq_data *d)
{
irq_chip_unmask_parent(d);
stm32_gpio_irq_trigger(d);
}
static struct irq_chip stm32_gpio_irq_chip = {
.name = "stm32gpio",
.irq_eoi = irq_chip_eoi_parent,
.irq_eoi = stm32_gpio_irq_eoi,
.irq_ack = irq_chip_ack_parent,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_set_type = irq_chip_set_type_parent,
.irq_unmask = stm32_gpio_irq_unmask,
.irq_set_type = stm32_gpio_set_type,
.irq_set_wake = irq_chip_set_wake_parent,
.irq_request_resources = stm32_gpio_irq_request_resources,
.irq_release_resources = stm32_gpio_irq_release_resources,
......
......@@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */
bool has_gicv4;
bool has_gicv4_1;
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
......
......@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
......
......@@ -248,6 +248,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
extern int prepare_percpu_nmi(unsigned int irq);
extern void teardown_percpu_nmi(unsigned int irq);
extern int irq_inject_interrupt(unsigned int irq);
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
......
......@@ -211,6 +211,8 @@ struct irq_data {
* IRQD_CAN_RESERVE - Can use reservation mode
* IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
* required
* IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
* from actual interrupt context.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
......@@ -234,6 +236,7 @@ enum {
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_CAN_RESERVE = (1 << 26),
IRQD_MSI_NOMASK_QUIRK = (1 << 27),
IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
......@@ -303,6 +306,16 @@ static inline bool irqd_is_single_target(struct irq_data *d)
return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
}
static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
}
static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
}
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
......
......@@ -32,6 +32,8 @@ struct gic_kvm_info {
struct resource vctrl;
/* vlpi support */
bool has_v4;
/* rvpeid support */
bool has_v4_1;
};
const struct gic_kvm_info *gic_get_kvm_info(void);
......
......@@ -57,6 +57,7 @@
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
......@@ -90,6 +91,7 @@
#define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
#define GICD_TYPER2_nASSGIcap (1U << 8)
#define GICD_TYPER2_VIL (1U << 7)
#define GICD_TYPER2_VID GENMASK(4, 0)
......@@ -320,6 +322,9 @@
#define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
#define GICR_VPENDBASER_InnerShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
......@@ -343,6 +348,15 @@
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
#define GICR_VSGIR 0x0080
#define GICR_VSGIR_VPEID GENMASK(15, 0)
#define GICR_VSGIPENDR 0x0088
#define GICR_VSGIPENDR_BUSY (1U << 31)
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
/*
* ITS registers, offsets from ITS_base
*/
......@@ -366,6 +380,11 @@
#define GITS_TRANSLATER 0x10040
#define GITS_SGIR 0x20020
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
......@@ -500,8 +519,9 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */
/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/*
......@@ -650,6 +670,7 @@
struct rdists {
struct {
raw_spinlock_t rd_lock;
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
......
......@@ -49,10 +49,22 @@ struct its_vpe {
};
/* GICv4.1 implementations */
struct {
struct fwnode_handle *fwnode;
struct irq_domain *sgi_domain;
struct {
u8 priority;
bool enabled;
bool group;
} sgi_config[16];
atomic_t vmapp_count;
};
};
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
*/
raw_spinlock_t vpe_lock;
/*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
......@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type {
SCHEDULE_VPE,
DESCHEDULE_VPE,
INVALL_VPE,
PROP_UPDATE_VSGI,
};
struct its_cmd_info {
......@@ -105,19 +118,27 @@ struct its_cmd_info {
bool g0en;
bool g1en;
};
struct {
u8 priority;
bool group;
};
};
};
int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on);
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
struct irq_domain_ops;
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops);
#endif
......@@ -43,6 +43,10 @@ config GENERIC_IRQ_MIGRATION
config AUTO_IRQ_AFFINITY
bool
# Interrupt injection mechanism
config GENERIC_IRQ_INJECTION
bool
# Tasklet based software resend for pending interrupts on enable_irq()
config HARDIRQS_SW_RESEND
bool
......@@ -127,6 +131,7 @@ config SPARSE_IRQ
config GENERIC_IRQ_DEBUGFS
bool "Expose irq internals in debugfs"
depends on DEBUG_FS
select GENERIC_IRQ_INJECTION
default n
---help---
......
......@@ -278,7 +278,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
}
}
if (resend)
check_irq_resend(desc);
check_irq_resend(desc, false);
return ret;
}
......
......@@ -190,33 +190,7 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
return -EFAULT;
if (!strncmp(buf, "trigger", size)) {
unsigned long flags;
int err;
/* Try the HW interface first */
err = irq_set_irqchip_state(irq_desc_get_irq(desc),
IRQCHIP_STATE_PENDING, true);
if (!err)
return count;
/*
* Otherwise, try to inject via the resend interface,
* which may or may not succeed.
*/
chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) {
/* Can't do level nor NMIs, sorry */
err = -EINVAL;
} else {
desc->istate |= IRQS_PENDING;
check_irq_resend(desc);
err = 0;
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(desc);
int err = irq_inject_interrupt(irq_desc_get_irq(desc));
return err ? err : count;
}
......
......@@ -108,7 +108,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event(struct irq_desc *desc);
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc);
int check_irq_resend(struct irq_desc *desc, bool inject);
bool irq_wait_for_poll(struct irq_desc *desc);
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
......@@ -425,6 +425,10 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
{
return desc->pending_mask;
}
static inline bool handle_enforce_irqctx(struct irq_data *data)
{
return irqd_is_handle_enforce_irqctx(data);
}
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear);
#else /* CONFIG_GENERIC_PENDING_IRQ */
static inline bool irq_can_move_pcntxt(struct irq_data *data)
......@@ -451,6 +455,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
{
return false;
}
static inline bool handle_enforce_irqctx(struct irq_data *data)
{
return false;
}
#endif /* !CONFIG_GENERIC_PENDING_IRQ */
#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
......
......@@ -638,9 +638,15 @@ void irq_init_desc(unsigned int irq)
int generic_handle_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *data;
if (!desc)
return -EINVAL;
data = irq_desc_get_irq_data(desc);
if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data)))
return -EPERM;
generic_handle_irq_desc(desc);
return 0;
}
......
......@@ -46,11 +46,11 @@ const struct fwnode_operations irqchip_fwnode_ops;
EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
/**
* irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
* __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
* identifying an irq domain
* @type: Type of irqchip_fwnode. See linux/irqdomain.h
* @name: Optional user provided domain name
* @id: Optional user provided id if name != NULL
* @name: Optional user provided domain name
* @pa: Optional user-provided physical address
*
* Allocate a struct irqchip_fwid, and return a poiner to the embedded
......@@ -1310,6 +1310,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs, void *arg)
{
if (!domain->ops->alloc) {
pr_debug("domain->ops->alloc() is NULL\n");
return -ENOSYS;
}
return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
}
......@@ -1347,11 +1352,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return -EINVAL;
}
if (!domain->ops->alloc) {
pr_debug("domain->ops->alloc() is NULL\n");
return -ENOSYS;
}
if (realloc && irq_base >= 0) {
virq = irq_base;
} else {
......
......@@ -47,6 +47,43 @@ static void resend_irqs(unsigned long arg)
/* Tasklet to handle resend: */
static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
static int irq_sw_resend(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
/*
* Validate whether this interrupt can be safely injected from
* non interrupt context
*/
if (handle_enforce_irqctx(&desc->irq_data))
return -EINVAL;
/*
* If the interrupt is running in the thread context of the parent
* irq we need to be careful, because we cannot trigger it
* directly.
*/
if (irq_settings_is_nested_thread(desc)) {
/*
* If the parent_irq is valid, we retrigger the parent,
* otherwise we do nothing.
*/
if (!desc->parent_irq)
return -EINVAL;
irq = desc->parent_irq;
}
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
return 0;
}
#else
static int irq_sw_resend(struct irq_desc *desc)
{
return -EINVAL;
}
#endif
/*
......@@ -54,49 +91,83 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
*
* Is called with interrupts disabled and desc->lock held.
*/
void check_irq_resend(struct irq_desc *desc)
int check_irq_resend(struct irq_desc *desc, bool inject)
{
int err = 0;
/*
* We do not resend level type interrupts. Level type
* interrupts are resent by hardware when they are still
* active. Clear the pending bit so suspend/resume does not
* get confused.
* We do not resend level type interrupts. Level type interrupts
* are resent by hardware when they are still active. Clear the
* pending bit so suspend/resume does not get confused.
*/
if (irq_settings_is_level(desc)) {
desc->istate &= ~IRQS_PENDING;
return;
return -EINVAL;
}
if (desc->istate & IRQS_REPLAY)
return;
if (desc->istate & IRQS_PENDING) {
desc->istate &= ~IRQS_PENDING;
return -EBUSY;
if (!(desc->istate & IRQS_PENDING) && !inject)
return 0;
desc->istate &= ~IRQS_PENDING;
if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data))
err = irq_sw_resend(desc);
/* If the retrigger was successfull, mark it with the REPLAY bit */
if (!err)
desc->istate |= IRQS_REPLAY;
return err;
}
if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
unsigned int irq = irq_desc_get_irq(desc);
/*
* If the interrupt is running in the thread
* context of the parent irq we need to be
* careful, because we cannot trigger it
* directly.
*/
if (irq_settings_is_nested_thread(desc)) {
/*
* If the parent_irq is valid, we
* retrigger the parent, otherwise we
* do nothing.
*/
if (!desc->parent_irq)
return;
irq = desc->parent_irq;
}
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
#endif
}
}
#ifdef CONFIG_GENERIC_IRQ_INJECTION
/**
* irq_inject_interrupt - Inject an interrupt for testing/error injection
* @irq: The interrupt number
*
* This function must only be used for debug and testing purposes!
*
* Especially on x86 this can cause a premature completion of an interrupt
* affinity change causing the interrupt line to become stale. Very
* unlikely, but possible.
*
* The injection can fail for various reasons:
* - Interrupt is not activated
* - Interrupt is NMI type or currently replaying
* - Interrupt is level type
* - Interrupt does not support hardware retrigger and software resend is
* either not enabled or not possible for the interrupt.
*/
int irq_inject_interrupt(unsigned int irq)
{
struct irq_desc *desc;
unsigned long flags;
int err;
/* Try the state injection hardware interface first */
if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true))
return 0;
/* That failed, try via the resend mechanism */
desc = irq_get_desc_buslock(irq, &flags, 0);
if (!desc)
return -EINVAL;
/*
* Only try to inject when the interrupt is:
* - not NMI type
* - activated
*/
if ((desc->istate & IRQS_NMI) || !irqd_is_activated(&desc->irq_data))
err = -EINVAL;
else
err = check_irq_resend(desc, true);
irq_put_desc_busunlock(desc, flags);
return err;
}
EXPORT_SYMBOL_GPL(irq_inject_interrupt);
#endif
......@@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */
if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_info("GICv4 support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
kvm_info("GICv4%s support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
gicv4_enable ? "en" : "dis");
}
......
......@@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the
* irq corresponding to the vcpu, then call its_schedule_vpe(). You
* must be in a non-preemptible context. On exit, another call to
* its_schedule_vpe() tells the redistributor that we're done with the
* vcpu.
* irq corresponding to the vcpu, then call its_make_vpe_resident().
* You must be in a non-preemptible context. On exit, a call to
* its_make_vpe_non_resident() tells the redistributor that we're done
* with the vcpu.
*
* Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is
......@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
if (!kvm_vgic_global_state.has_gicv4_1 &&
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
......@@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0;
/*
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
return its_schedule_vpe(vpe, false);
return its_make_vpe_non_resident(vpe, need_db);
}
int vgic_v4_load(struct kvm_vcpu *vcpu)
......@@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if (err)
return err;
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
err = its_schedule_vpe(vpe, true);
err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
if (err)
return err;
/*
* Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending.
* doorbell interrupt that would still be pending. This is a
* GICv4.0 only "feature"...
*/
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
if (!kvm_vgic_global_state.has_gicv4_1)
err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
return err;
}
static struct vgic_its *vgic_get_its(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment