Commit 8a13b02a authored by Thomas Gleixner's avatar Thomas Gleixner

Merge tag 'irqchip-5.7' of...

Merge tag 'irqchip-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core

Pull irqchip updates from Marc Zyngier:

 - Second batch of the GICv4.1 support saga
 - Level triggered interrupt support for the stm32 controller
 - Versatile-fpga chained interrupt fixes
 - DT support for cascaded VIC interrupt controller
 - RPi irqchip initialization fixes
 - Multi-instance support for the Xilinx interrupt controller
 - Multi-instance support for the PLIC interrupt controller
 - CPU hotplug support for the PLIC interrupt controller
 - Ingenic X1000 TCU support
 - Small fixes all over the shop (GICv3, GICv4, Xilinx, Atmel, sa1111)
 - Cleanups (setup_irq removal, zero-length array removal)
parents ba947241 771df8cf
......@@ -302,10 +302,13 @@ static int sa1111_retrigger_irq(struct irq_data *d)
break;
}
if (i == 8)
if (i == 8) {
pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
d->irq);
return i == 8 ? -1 : 0;
return 0;
}
return 1;
}
static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
......
......@@ -47,6 +47,8 @@ config MICROBLAZE
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU
select SPARSE_IRQ
select GENERIC_IRQ_MULTI_HANDLER
select HANDLE_DOMAIN_IRQ
# Endianness selection
choice
......
......@@ -14,7 +14,4 @@
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
/* should be defined in each interrupt controller driver */
extern unsigned int xintc_get_irq(void);
#endif /* _ASM_MICROBLAZE_IRQ_H */
......@@ -20,29 +20,10 @@
#include <linux/irqchip.h>
#include <linux/of_irq.h>
static u32 concurrent_irq;
void __irq_entry do_IRQ(struct pt_regs *regs)
{
unsigned int irq;
struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off();
irq_enter();
irq = xintc_get_irq();
next_irq:
BUG_ON(!irq);
generic_handle_irq(irq);
irq = xintc_get_irq();
if (irq != -1U) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
goto next_irq;
}
irq_exit();
set_irq_regs(old_regs);
handle_arch_irq(regs);
trace_hardirqs_on();
}
......
......@@ -157,5 +157,5 @@ void __init trap_init(void)
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
/* Enable interrupts */
csr_write(CSR_IE, IE_SIE | IE_EIE);
csr_write(CSR_IE, IE_SIE);
}
......@@ -458,7 +458,7 @@ config IMX_IRQSTEER
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
config IMX_INTMUX
def_bool y if ARCH_MXC
def_bool y if ARCH_MXC || COMPILE_TEST
select IRQ_DOMAIN
help
Support for the i.MX INTMUX interrupt multiplexer.
......
......@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d)
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
irq_gc_unlock(gc);
return 0;
return 1;
}
static int aic_set_type(struct irq_data *d, unsigned type)
......
......@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d)
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
irq_gc_unlock(bgc);
return 0;
return 1;
}
static int aic5_set_type(struct irq_data *d, unsigned type)
......
......@@ -61,6 +61,7 @@
| SHORTCUT1_MASK | SHORTCUT2_MASK)
#define REG_FIQ_CONTROL 0x0c
#define FIQ_CONTROL_ENABLE BIT(7)
#define NR_BANKS 3
#define IRQS_PER_BANK 32
......@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node,
{
void __iomem *base;
int irq, b, i;
u32 reg;
base = of_iomap(node, 0);
if (!base)
......@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node,
handle_level_irq);
irq_set_probe(irq);
}
reg = readl_relaxed(intc.enable[b]);
if (reg) {
writel_relaxed(reg, intc.disable[b]);
pr_err(FW_BUG "Bootloader left irq enabled: "
"bank %d irq %*pbl\n", b, IRQS_PER_BANK, &reg);
}
}
reg = readl_relaxed(base + REG_FIQ_CONTROL);
if (reg & FIQ_CONTROL_ENABLE) {
writel_relaxed(0, base + REG_FIQ_CONTROL);
pr_err(FW_BUG "Bootloader left fiq enabled\n");
}
if (is_2836) {
......
......@@ -50,7 +50,7 @@ struct bcm7038_l1_chip {
struct bcm7038_l1_cpu {
void __iomem *map_base;
u32 mask_cache[0];
u32 mask_cache[];
};
/*
......
This diff is collapsed.
......@@ -723,6 +723,7 @@ static void __init gic_dist_init(void)
unsigned int i;
u64 affinity;
void __iomem *base = gic_data.dist_base;
u32 val;
/* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR);
......@@ -755,9 +756,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
pr_info("Enabling SGIs without active state\n");
val |= GICD_CTLR_nASSGIreq;
}
/* Enable distributor with ARE, Group1 */
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
base + GICD_CTLR);
writel_relaxed(val, base + GICD_CTLR);
/*
* Set all global interrupts to the boot CPU only. ARE must be
......@@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base;
raw_spin_lock_init(&gic_data_rdist()->rd_lock);
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset;
......@@ -1581,7 +1588,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
......@@ -1592,6 +1598,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free;
}
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
pr_info("Distributor has %sRange Selector support\n",
gic_data.has_rss ? "" : "no ");
......@@ -1757,6 +1765,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
......@@ -2072,6 +2081,7 @@ static void __init gic_acpi_setup_kvm_info(void)
}
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
......
......@@ -85,6 +85,53 @@
static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops;
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
{
char *name;
int sgi_base;
if (!has_v4_1())
return 0;
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
if (!name)
goto err;
vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
if (!vpe->fwnode)
goto err;
kfree(name);
name = NULL;
vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
sgi_domain_ops, vpe);
if (!vpe->sgi_domain)
goto err;
sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
NUMA_NO_NODE, vpe,
false, NULL);
if (sgi_base <= 0)
goto err;
return 0;
err:
if (vpe->sgi_domain)
irq_domain_remove(vpe->sgi_domain);
if (vpe->fwnode)
irq_domain_free_fwnode(vpe->fwnode);
kfree(name);
return -ENOMEM;
}
int its_alloc_vcpu_irqs(struct its_vm *vm)
{
......@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if (vpe_base_irq <= 0)
goto err;
for (i = 0; i < vm->nr_vpes; i++)
for (i = 0; i < vm->nr_vpes; i++) {
int ret;
vm->vpes[i]->irq = vpe_base_irq + i;
ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
if (ret)
goto err;
}
return 0;
......@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
return -ENOMEM;
}
static void its_free_sgi_irqs(struct its_vm *vm)
{
int i;
if (!has_v4_1())
return;
for (i = 0; i < vm->nr_vpes; i++) {
unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
if (WARN_ON(!irq))
continue;
irq_domain_free_irqs(irq, 16);
irq_domain_remove(vm->vpes[i]->sgi_domain);
irq_domain_free_fwnode(vm->vpes[i]->fwnode);
}
}
void its_free_vcpu_irqs(struct its_vm *vm)
{
its_free_sgi_irqs(vm);
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode);
......@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info);
}
int its_schedule_vpe(struct its_vpe *vpe, bool on)
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
{
struct irq_desc *desc = irq_to_desc(vpe->irq);
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = DESCHEDULE_VPE;
if (has_v4_1()) {
/* GICv4.1 can directly deal with doorbells */
info.req_db = db;
} else {
/* Undo the nested disable_irq() calls... */
while (db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = false;
return ret;
}
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{
struct its_cmd_info info;
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
info.cmd_type = SCHEDULE_VPE;
if (has_v4_1()) {
info.g0en = g0en;
info.g1en = g1en;
} else {
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = on;
vpe->resident = true;
return ret;
}
......@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
int its_prop_update_vsgi(int irq, u8 priority, bool group)
{
struct its_cmd_info info = {
.cmd_type = PROP_UPDATE_VSGI,
{
.priority = priority,
.group = group,
},
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops)
{
if (domain) {
pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain;
vpe_domain_ops = ops;
vpe_domain_ops = vpe_ops;
sgi_domain_ops = sgi_ops;
return 0;
}
......
......@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {
.name = "pic1",
.start = PIC_MASTER_CMD,
......@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = {
*/
struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
{
/*
* PIC_CASCADE_IR is cascade interrupt to second interrupt controller
*/
int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
struct irq_domain *domain;
insert_resource(&ioport_resource, &pic1_io_resource);
......@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (!domain)
panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to register cascade interrupt\n");
register_syscore_ops(&i8259_syscore_ops);
return domain;
}
......
......@@ -180,3 +180,4 @@ static int __init ingenic_tcu_irq_init(struct device_node *np,
IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
......@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data)
return IRQ_HANDLED;
}
static struct irqaction intc_cascade_action = {
.handler = intc_cascade,
.name = "SoC intc cascade interrupt",
};
static int __init ingenic_intc_of_init(struct device_node *node,
unsigned num_chips)
{
......@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node,
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
setup_irq(parent_irq, &intc_cascade_action);
if (request_irq(parent_irq, intc_cascade, 0,
"SoC intc cascade interrupt", NULL))
pr_err("Failed to register SoC intc cascade interrupt\n");
return 0;
out_domain_remove:
......
......@@ -4,6 +4,7 @@
* Copyright (C) 2018 Christoph Hellwig
*/
#define pr_fmt(fmt) "plic: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
......@@ -55,7 +56,14 @@
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
static void __iomem *plic_regs;
#define PLIC_DISABLE_THRESHOLD 0xf
#define PLIC_ENABLE_THRESHOLD 0
struct plic_priv {
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
};
struct plic_handler {
bool present;
......@@ -66,6 +74,7 @@ struct plic_handler {
*/
raw_spinlock_t enable_lock;
void __iomem *enable_base;
struct plic_priv *priv;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
......@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler,
}
static inline void plic_irq_toggle(const struct cpumask *mask,
int hwirq, int enable)
struct irq_data *d, int enable)
{
int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present)
plic_toggle(handler, hwirq, enable);
if (handler->present &&
cpumask_test_cpu(cpu, &handler->priv->lmask))
plic_toggle(handler, d->hwirq, enable);
}
}
static void plic_irq_unmask(struct irq_data *d)
{
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
struct cpumask amask;
unsigned int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
&amask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return;
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
plic_irq_toggle(cpumask_of(cpu), d, 1);
}
static void plic_irq_mask(struct irq_data *d)
{
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
struct plic_priv *priv = irq_get_chip_data(d->irq);
plic_irq_toggle(&priv->lmask, d, 0);
}
#ifdef CONFIG_SMP
......@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
struct cpumask amask;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, mask_val);
if (force)
cpu = cpumask_first(mask_val);
cpu = cpumask_first(&amask);
else
cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu = cpumask_any_and(&amask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
plic_irq_toggle(&priv->lmask, d, 0);
plic_irq_toggle(cpumask_of(cpu), d, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
......@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.free = irq_domain_free_irqs_top,
};
static struct irq_domain *plic_irqdomain;
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
......@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs)
csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq);
int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
......@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node)
return -1;
}
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
{
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
}
static int plic_dying_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_clear(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
return 0;
}
static int plic_starting_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_set(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
}
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
struct plic_priv *priv;
if (plic_regs) {
pr_warn("PLIC already present.\n");
return -ENXIO;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
plic_regs = of_iomap(node, 0);
if (WARN_ON(!plic_regs))
return -EIO;
priv->regs = of_iomap(node, 0);
if (WARN_ON(!priv->regs)) {
error = -EIO;
goto out_free_priv;
}
error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
......@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node,
goto out_iounmap;
error = -ENOMEM;
plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, NULL);
if (WARN_ON(!plic_irqdomain))
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, priv);
if (WARN_ON(!priv->irqdomain))
goto out_iounmap;
for (i = 0; i < nr_contexts; i++) {
......@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
......@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node,
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
pr_warn("handler already present for context %d.\n", i);
threshold = 0xffffffff;
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
handler->hart_base =
plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base =
plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
handler->priv = priv;
done:
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
nr_handlers++;
}
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
out_iounmap:
iounmap(plic_regs);
iounmap(priv->regs);
out_free_priv:
kfree(priv);
return error;
}
......
......@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void)
unregister_syscore_ops(&stm32_exti_h_syscore_ops);
}
static int stm32_exti_h_retrigger(struct irq_data *d)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
void __iomem *base = chip_data->host_data->base;
u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
writel_relaxed(mask, base + stm32_bank->swier_ofst);
return 0;
}
static struct irq_chip stm32_exti_h_chip = {
.name = "stm32-exti-h",
.irq_eoi = stm32_exti_h_eoi,
.irq_mask = stm32_exti_h_mask,
.irq_unmask = stm32_exti_h_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_retrigger = stm32_exti_h_retrigger,
.irq_set_type = stm32_exti_h_set_type,
.irq_set_wake = stm32_exti_h_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND,
......
......@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
......@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
u32 status = readl(f->base + IRQ_STATUS);
u32 status;
chained_irq_enter(chip, desc);
status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(desc);
return;
goto out;
}
do {
......@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
out:
chained_irq_exit(chip, desc);
}
/*
......@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
......@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/*
* On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need
......
......@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node,
void __iomem *regs;
u32 interrupt_mask = ~0;
u32 wakeup_mask = ~0;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
int parent_irq;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
......@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node,
of_property_read_u32(node, "valid-mask", &interrupt_mask);
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
parent_irq = of_irq_get(node, 0);
if (parent_irq < 0)
parent_irq = 0;
/*
* Passing 0 as first IRQ makes the simple domain allocate descriptors
*/
__vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
__vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
return 0;
}
......
......@@ -38,29 +38,31 @@ struct xintc_irq_chip {
void __iomem *base;
struct irq_domain *root_domain;
u32 intr_mask;
u32 nr_irq;
};
static struct xintc_irq_chip *xintc_irqc;
static struct xintc_irq_chip *primary_intc;
static void xintc_write(int reg, u32 data)
static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
{
if (static_branch_unlikely(&xintc_is_be))
iowrite32be(data, xintc_irqc->base + reg);
iowrite32be(data, irqc->base + reg);
else
iowrite32(data, xintc_irqc->base + reg);
iowrite32(data, irqc->base + reg);
}
static unsigned int xintc_read(int reg)
static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
{
if (static_branch_unlikely(&xintc_is_be))
return ioread32be(xintc_irqc->base + reg);
return ioread32be(irqc->base + reg);
else
return ioread32(xintc_irqc->base + reg);
return ioread32(irqc->base + reg);
}
static void intc_enable_or_unmask(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
......@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler
*/
if (irqd_is_level_type(d))
xintc_write(IAR, mask);
xintc_write(irqc, IAR, mask);
xintc_write(SIE, mask);
xintc_write(irqc, SIE, mask);
}
static void intc_disable_or_mask(struct irq_data *d)
{
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
xintc_write(CIE, 1 << d->hwirq);
xintc_write(irqc, CIE, BIT(d->hwirq));
}
static void intc_ack(struct irq_data *d)
{
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
xintc_write(IAR, 1 << d->hwirq);
xintc_write(irqc, IAR, BIT(d->hwirq));
}
static void intc_mask_ack(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
xintc_write(CIE, mask);
xintc_write(IAR, mask);
xintc_write(irqc, CIE, mask);
xintc_write(irqc, IAR, mask);
}
static struct irq_chip intc_dev = {
......@@ -103,13 +110,14 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack,
};
unsigned int xintc_get_irq(void)
static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
{
unsigned int hwirq, irq = -1;
unsigned int irq = 0;
u32 hwirq;
hwirq = xintc_read(IVR);
hwirq = xintc_read(irqc, IVR);
if (hwirq != -1U)
irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
irq = irq_find_mapping(irqc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
......@@ -118,7 +126,9 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
if (xintc_irqc->intr_mask & (1 << hw)) {
struct xintc_irq_chip *irqc = d->host_data;
if (irqc->intr_mask & BIT(hw)) {
irq_set_chip_and_handler_name(irq, &intc_dev,
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
......@@ -127,6 +137,7 @@ static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
irq_set_chip_data(irq, irqc);
return 0;
}
......@@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct xintc_irq_chip *irqc;
u32 pending;
irqc = irq_data_get_irq_handler_data(&desc->irq_data);
chained_irq_enter(chip, desc);
do {
pending = xintc_get_irq();
if (pending == -1U)
pending = xintc_get_irq_local(irqc);
if (pending == 0)
break;
generic_handle_irq(pending);
} while (true);
chained_irq_exit(chip, desc);
}
static void xil_intc_handle_irq(struct pt_regs *regs)
{
u32 hwirq;
struct xintc_irq_chip *irqc = primary_intc;
do {
hwirq = xintc_read(irqc, IVR);
if (likely(hwirq != -1U)) {
int ret;
ret = handle_domain_irq(irqc->root_domain, hwirq, regs);
WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq);
continue;
}
break;
} while (1);
}
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
u32 nr_irq;
int ret, irq;
struct xintc_irq_chip *irqc;
if (xintc_irqc) {
pr_err("irq-xilinx: Multiple instances aren't supported\n");
return -EINVAL;
}
int ret, irq;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc)
return -ENOMEM;
xintc_irqc = irqc;
irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base);
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
goto err_alloc;
goto error;
}
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
......@@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
if (irqc->intr_mask >> nr_irq)
if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
intc, nr_irq, irqc->intr_mask);
intc, irqc->nr_irq, irqc->intr_mask);
/*
* Disable all external interrupts until they are
* explicity requested.
*/
xintc_write(IER, 0);
xintc_write(irqc, IER, 0);
/* Acknowledge any pending interrupts just in case. */
xintc_write(IAR, 0xffffffff);
xintc_write(irqc, IAR, 0xffffffff);
/* Turn on the Master Enable. */
xintc_write(MER, MER_HIE | MER_ME);
if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
xintc_write(irqc, MER, MER_HIE | MER_ME);
if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
static_branch_enable(&xintc_is_be);
xintc_write(MER, MER_HIE | MER_ME);
xintc_write(irqc, MER, MER_HIE | MER_ME);
}
irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
goto err_alloc;
ret = -EINVAL;
goto error;
}
if (parent) {
......@@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else {
pr_err("irq-xilinx: interrupts property not in DT\n");
ret = -EINVAL;
goto err_alloc;
goto error;
}
} else {
irq_set_default_host(irqc->root_domain);
primary_intc = irqc;
set_handle_irq(xil_intc_handle_irq);
}
return 0;
err_alloc:
xintc_irqc = NULL;
error:
iounmap(irqc->base);
kfree(irqc);
return ret;
......
......@@ -33,7 +33,7 @@ struct combiner {
int parent_irq;
u32 nirqs;
u32 nregs;
struct combiner_reg regs[0];
struct combiner_reg regs[];
};
static inline int irq_nr(u32 reg, u32 bit)
......
......@@ -92,6 +92,7 @@ struct stm32_gpio_bank {
u32 bank_nr;
u32 bank_ioport_nr;
u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
u8 irq_type[STM32_GPIO_PINS_PER_BANK];
};
struct stm32_pinctrl {
......@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = {
.get_direction = stm32_gpio_get_direction,
};
static void stm32_gpio_irq_trigger(struct irq_data *d)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
int level;
/* If level interrupt type then retrig */
level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
}
static void stm32_gpio_irq_eoi(struct irq_data *d)
{
irq_chip_eoi_parent(d);
stm32_gpio_irq_trigger(d);
};
static int stm32_gpio_set_type(struct irq_data *d, unsigned int type)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
u32 parent_type;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
parent_type = type;
break;
case IRQ_TYPE_LEVEL_HIGH:
parent_type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
parent_type = IRQ_TYPE_EDGE_FALLING;
break;
default:
return -EINVAL;
}
bank->irq_type[d->hwirq] = type;
return irq_chip_set_type_parent(d, parent_type);
};
static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
......@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
static void stm32_gpio_irq_unmask(struct irq_data *d)
{
irq_chip_unmask_parent(d);
stm32_gpio_irq_trigger(d);
}
static struct irq_chip stm32_gpio_irq_chip = {
.name = "stm32gpio",
.irq_eoi = irq_chip_eoi_parent,
.irq_eoi = stm32_gpio_irq_eoi,
.irq_ack = irq_chip_ack_parent,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_set_type = irq_chip_set_type_parent,
.irq_unmask = stm32_gpio_irq_unmask,
.irq_set_type = stm32_gpio_set_type,
.irq_set_wake = irq_chip_set_wake_parent,
.irq_request_resources = stm32_gpio_irq_request_resources,
.irq_release_resources = stm32_gpio_irq_release_resources,
......
......@@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */
bool has_gicv4;
bool has_gicv4_1;
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
......
......@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
......
......@@ -32,6 +32,8 @@ struct gic_kvm_info {
struct resource vctrl;
/* vlpi support */
bool has_v4;
/* rvpeid support */
bool has_v4_1;
};
const struct gic_kvm_info *gic_get_kvm_info(void);
......
......@@ -57,6 +57,7 @@
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
......@@ -90,6 +91,7 @@
#define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
#define GICD_TYPER2_nASSGIcap (1U << 8)
#define GICD_TYPER2_VIL (1U << 7)
#define GICD_TYPER2_VID GENMASK(4, 0)
......@@ -320,6 +322,9 @@
#define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
#define GICR_VPENDBASER_InnerShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
......@@ -343,6 +348,15 @@
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
#define GICR_VSGIR 0x0080
#define GICR_VSGIR_VPEID GENMASK(15, 0)
#define GICR_VSGIPENDR 0x0088
#define GICR_VSGIPENDR_BUSY (1U << 31)
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
/*
* ITS registers, offsets from ITS_base
*/
......@@ -366,6 +380,11 @@
#define GITS_TRANSLATER 0x10040
#define GITS_SGIR 0x20020
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
......@@ -500,8 +519,9 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */
/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/*
......@@ -650,6 +670,7 @@
struct rdists {
struct {
raw_spinlock_t rd_lock;
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
......
......@@ -49,10 +49,22 @@ struct its_vpe {
};
/* GICv4.1 implementations */
struct {
struct fwnode_handle *fwnode;
struct irq_domain *sgi_domain;
struct {
u8 priority;
bool enabled;
bool group;
} sgi_config[16];
atomic_t vmapp_count;
};
};
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
*/
raw_spinlock_t vpe_lock;
/*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
......@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type {
SCHEDULE_VPE,
DESCHEDULE_VPE,
INVALL_VPE,
PROP_UPDATE_VSGI,
};
struct its_cmd_info {
......@@ -105,19 +118,27 @@ struct its_cmd_info {
bool g0en;
bool g1en;
};
struct {
u8 priority;
bool group;
};
};
};
int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on);
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
struct irq_domain_ops;
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops);
#endif
......@@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */
if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_info("GICv4 support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
kvm_info("GICv4%s support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
gicv4_enable ? "en" : "dis");
}
......
......@@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the
* irq corresponding to the vcpu, then call its_schedule_vpe(). You
* must be in a non-preemptible context. On exit, another call to
* its_schedule_vpe() tells the redistributor that we're done with the
* vcpu.
* irq corresponding to the vcpu, then call its_make_vpe_resident().
* You must be in a non-preemptible context. On exit, a call to
* its_make_vpe_non_resident() tells the redistributor that we're done
* with the vcpu.
*
* Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is
......@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
if (!kvm_vgic_global_state.has_gicv4_1 &&
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
......@@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0;
/*
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
return its_schedule_vpe(vpe, false);
return its_make_vpe_non_resident(vpe, need_db);
}
int vgic_v4_load(struct kvm_vcpu *vcpu)
......@@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if (err)
return err;
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
err = its_schedule_vpe(vpe, true);
err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
if (err)
return err;
/*
* Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending.
* doorbell interrupt that would still be pending. This is a
* GICv4.0 only "feature"...
*/
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
if (!kvm_vgic_global_state.has_gicv4_1)
err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
return err;
}
static struct vgic_its *vgic_get_its(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment