Commit 8a13b02a authored by Thomas Gleixner's avatar Thomas Gleixner

Merge tag 'irqchip-5.7' of...

Merge tag 'irqchip-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core

Pull irqchip updates from Marc Zyngier:

 - Second batch of the GICv4.1 support saga
 - Level triggered interrupt support for the stm32 controller
 - Versatile-fpga chained interrupt fixes
 - DT support for cascaded VIC interrupt controller
 - RPi irqchip initialization fixes
 - Multi-instance support for the Xilinx interrupt controller
 - Multi-instance support for the PLIC interrupt controller
 - CPU hotplug support for the PLIC interrupt controller
 - Ingenic X1000 TCU support
 - Small fixes all over the shop (GICv3, GICv4, Xilinx, Atmel, sa1111)
 - Cleanups (setup_irq removal, zero-length array removal)
parents ba947241 771df8cf
...@@ -302,10 +302,13 @@ static int sa1111_retrigger_irq(struct irq_data *d) ...@@ -302,10 +302,13 @@ static int sa1111_retrigger_irq(struct irq_data *d)
break; break;
} }
if (i == 8) if (i == 8) {
pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n", pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
d->irq); d->irq);
return i == 8 ? -1 : 0; return 0;
}
return 1;
} }
static int sa1111_type_irq(struct irq_data *d, unsigned int flags) static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
......
...@@ -47,6 +47,8 @@ config MICROBLAZE ...@@ -47,6 +47,8 @@ config MICROBLAZE
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU select MMU_GATHER_NO_RANGE if MMU
select SPARSE_IRQ select SPARSE_IRQ
select GENERIC_IRQ_MULTI_HANDLER
select HANDLE_DOMAIN_IRQ
# Endianness selection # Endianness selection
choice choice
......
...@@ -14,7 +14,4 @@ ...@@ -14,7 +14,4 @@
struct pt_regs; struct pt_regs;
extern void do_IRQ(struct pt_regs *regs); extern void do_IRQ(struct pt_regs *regs);
/* should be defined in each interrupt controller driver */
extern unsigned int xintc_get_irq(void);
#endif /* _ASM_MICROBLAZE_IRQ_H */ #endif /* _ASM_MICROBLAZE_IRQ_H */
...@@ -20,29 +20,10 @@ ...@@ -20,29 +20,10 @@
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
static u32 concurrent_irq;
void __irq_entry do_IRQ(struct pt_regs *regs) void __irq_entry do_IRQ(struct pt_regs *regs)
{ {
unsigned int irq;
struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off(); trace_hardirqs_off();
handle_arch_irq(regs);
irq_enter();
irq = xintc_get_irq();
next_irq:
BUG_ON(!irq);
generic_handle_irq(irq);
irq = xintc_get_irq();
if (irq != -1U) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
goto next_irq;
}
irq_exit();
set_irq_regs(old_regs);
trace_hardirqs_on(); trace_hardirqs_on();
} }
......
...@@ -157,5 +157,5 @@ void __init trap_init(void) ...@@ -157,5 +157,5 @@ void __init trap_init(void)
/* Set the exception vector address */ /* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception); csr_write(CSR_TVEC, &handle_exception);
/* Enable interrupts */ /* Enable interrupts */
csr_write(CSR_IE, IE_SIE | IE_EIE); csr_write(CSR_IE, IE_SIE);
} }
...@@ -458,7 +458,7 @@ config IMX_IRQSTEER ...@@ -458,7 +458,7 @@ config IMX_IRQSTEER
Support for the i.MX IRQSTEER interrupt multiplexer/remapper. Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
config IMX_INTMUX config IMX_INTMUX
def_bool y if ARCH_MXC def_bool y if ARCH_MXC || COMPILE_TEST
select IRQ_DOMAIN select IRQ_DOMAIN
help help
Support for the i.MX INTMUX interrupt multiplexer. Support for the i.MX INTMUX interrupt multiplexer.
......
...@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d) ...@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d)
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
irq_gc_unlock(gc); irq_gc_unlock(gc);
return 0; return 1;
} }
static int aic_set_type(struct irq_data *d, unsigned type) static int aic_set_type(struct irq_data *d, unsigned type)
......
...@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d) ...@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d)
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
irq_gc_unlock(bgc); irq_gc_unlock(bgc);
return 0; return 1;
} }
static int aic5_set_type(struct irq_data *d, unsigned type) static int aic5_set_type(struct irq_data *d, unsigned type)
......
...@@ -61,6 +61,7 @@ ...@@ -61,6 +61,7 @@
| SHORTCUT1_MASK | SHORTCUT2_MASK) | SHORTCUT1_MASK | SHORTCUT2_MASK)
#define REG_FIQ_CONTROL 0x0c #define REG_FIQ_CONTROL 0x0c
#define FIQ_CONTROL_ENABLE BIT(7)
#define NR_BANKS 3 #define NR_BANKS 3
#define IRQS_PER_BANK 32 #define IRQS_PER_BANK 32
...@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node,
{ {
void __iomem *base; void __iomem *base;
int irq, b, i; int irq, b, i;
u32 reg;
base = of_iomap(node, 0); base = of_iomap(node, 0);
if (!base) if (!base)
...@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node,
handle_level_irq); handle_level_irq);
irq_set_probe(irq); irq_set_probe(irq);
} }
reg = readl_relaxed(intc.enable[b]);
if (reg) {
writel_relaxed(reg, intc.disable[b]);
pr_err(FW_BUG "Bootloader left irq enabled: "
"bank %d irq %*pbl\n", b, IRQS_PER_BANK, &reg);
}
}
reg = readl_relaxed(base + REG_FIQ_CONTROL);
if (reg & FIQ_CONTROL_ENABLE) {
writel_relaxed(0, base + REG_FIQ_CONTROL);
pr_err(FW_BUG "Bootloader left fiq enabled\n");
} }
if (is_2836) { if (is_2836) {
......
...@@ -50,7 +50,7 @@ struct bcm7038_l1_chip { ...@@ -50,7 +50,7 @@ struct bcm7038_l1_chip {
struct bcm7038_l1_cpu { struct bcm7038_l1_cpu {
void __iomem *map_base; void __iomem *map_base;
u32 mask_cache[0]; u32 mask_cache[];
}; };
/* /*
......
...@@ -96,6 +96,7 @@ struct its_node { ...@@ -96,6 +96,7 @@ struct its_node {
struct mutex dev_alloc_lock; struct mutex dev_alloc_lock;
struct list_head entry; struct list_head entry;
void __iomem *base; void __iomem *base;
void __iomem *sgir_base;
phys_addr_t phys_base; phys_addr_t phys_base;
struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write; struct its_cmd_block *cmd_write;
...@@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida); ...@@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
/*
* Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
* always have vSGIs mapped.
*/
static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
{
return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
}
static u16 get_its_list(struct its_vm *vm) static u16 get_its_list(struct its_vm *vm)
{ {
struct its_node *its; struct its_node *its;
...@@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm) ...@@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm)
if (!is_v4(its)) if (!is_v4(its))
continue; continue;
if (vm->vlpi_count[its->list_nr]) if (require_its_list_vmovp(vm, its))
__set_bit(its->list_nr, &its_list); __set_bit(its->list_nr, &its_list);
} }
...@@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) ...@@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
return NULL; return NULL;
} }
static int irq_to_cpuid(struct irq_data *d) static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
{
raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
return vpe->col_idx;
}
static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
{
raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
}
static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
{ {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map = get_vlpi_map(d); struct its_vlpi_map *map = get_vlpi_map(d);
int cpu;
if (map) if (map) {
return map->vpe->col_idx; cpu = vpe_to_cpuid_lock(map->vpe, flags);
} else {
/* Physical LPIs are already locked via the irq_desc lock */
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
cpu = its_dev->event_map.col_map[its_get_event_id(d)];
/* Keep GCC quiet... */
*flags = 0;
}
return cpu;
}
static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
{
struct its_vlpi_map *map = get_vlpi_map(d);
return its_dev->event_map.col_map[its_get_event_id(d)]; if (map)
vpe_to_cpuid_unlock(map->vpe, flags);
} }
static struct its_collection *valid_col(struct its_collection *col) static struct its_collection *valid_col(struct its_collection *col)
...@@ -353,6 +389,15 @@ struct its_cmd_desc { ...@@ -353,6 +389,15 @@ struct its_cmd_desc {
struct { struct {
struct its_vpe *vpe; struct its_vpe *vpe;
} its_invdb_cmd; } its_invdb_cmd;
struct {
struct its_vpe *vpe;
u8 sgi;
u8 priority;
bool enable;
bool group;
bool clear;
} its_vsgi_cmd;
}; };
}; };
...@@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db) ...@@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db)
its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
} }
static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
{
its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
}
static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
{
its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
}
static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
{
its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
}
static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
{
its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
}
static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
{
its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
}
static inline void its_fixup_cmd(struct its_cmd_block *cmd) static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{ {
/* Let's fixup BE commands */ /* Let's fixup BE commands */
...@@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its, ...@@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
return valid_vpe(its, desc->its_invdb_cmd.vpe); return valid_vpe(its, desc->its_invdb_cmd.vpe);
} }
static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
if (WARN_ON(!is_v4_1(its)))
return NULL;
its_encode_cmd(cmd, GITS_CMD_VSGI);
its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vsgi_cmd.vpe);
}
static u64 its_cmd_ptr_to_offset(struct its_node *its, static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr) struct its_cmd_block *ptr)
{ {
...@@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe) ...@@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
if (!is_v4(its)) if (!is_v4(its))
continue; continue;
if (!vpe->its_vm->vlpi_count[its->list_nr]) if (!require_its_list_vmovp(vpe->its_vm, its))
continue; continue;
desc.its_vmovp_cmd.col = &its->collections[col_id]; desc.its_vmovp_cmd.col = &its->collections[col_id];
...@@ -1321,7 +1411,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) ...@@ -1321,7 +1411,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
static void wait_for_syncr(void __iomem *rdbase) static void wait_for_syncr(void __iomem *rdbase)
{ {
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
cpu_relax(); cpu_relax();
} }
...@@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d) ...@@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d)
{ {
struct its_vlpi_map *map = get_vlpi_map(d); struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase; void __iomem *rdbase;
unsigned long flags;
u64 val; u64 val;
int cpu;
if (map) { if (map) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
...@@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d) ...@@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d)
} }
/* Target the redistributor this LPI is currently routed to */ /* Target the redistributor this LPI is currently routed to */
rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; cpu = irq_to_cpuid_lock(d, &flags);
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVLPIR); gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase); wait_for_syncr(rdbase);
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
irq_to_cpuid_unlock(d, flags);
} }
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
...@@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d, ...@@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return 0; return 0;
} }
/*
* Two favourable cases:
*
* (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
* for vSGI delivery
*
* (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
* and we're better off mapping all VPEs always
*
* If neither (a) nor (b) is true, then we map vPEs on demand.
*
*/
static bool gic_requires_eager_mapping(void)
{
if (!its_list_map || gic_rdists->has_rvpeid)
return true;
return false;
}
static void its_map_vm(struct its_node *its, struct its_vm *vm) static void its_map_vm(struct its_node *its, struct its_vm *vm)
{ {
unsigned long flags; unsigned long flags;
/* Not using the ITS list? Everything is always mapped. */ if (gic_requires_eager_mapping())
if (!its_list_map)
return; return;
raw_spin_lock_irqsave(&vmovp_lock, flags); raw_spin_lock_irqsave(&vmovp_lock, flags);
...@@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm) ...@@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
unsigned long flags; unsigned long flags;
/* Not using the ITS list? Everything is always mapped. */ /* Not using the ITS list? Everything is always mapped. */
if (!its_list_map) if (gic_requires_eager_mapping())
return; return;
raw_spin_lock_irqsave(&vmovp_lock, flags); raw_spin_lock_irqsave(&vmovp_lock, flags);
...@@ -2036,18 +2151,17 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser, ...@@ -2036,18 +2151,17 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
} }
static int its_setup_baser(struct its_node *its, struct its_baser *baser, static int its_setup_baser(struct its_node *its, struct its_baser *baser,
u64 cache, u64 shr, u32 psz, u32 order, u64 cache, u64 shr, u32 order, bool indirect)
bool indirect)
{ {
u64 val = its_read_baser(its, baser); u64 val = its_read_baser(its, baser);
u64 esz = GITS_BASER_ENTRY_SIZE(val); u64 esz = GITS_BASER_ENTRY_SIZE(val);
u64 type = GITS_BASER_TYPE(val); u64 type = GITS_BASER_TYPE(val);
u64 baser_phys, tmp; u64 baser_phys, tmp;
u32 alloc_pages; u32 alloc_pages, psz;
struct page *page; struct page *page;
void *base; void *base;
retry_alloc_baser: psz = baser->psz;
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) { if (alloc_pages > GITS_BASER_PAGES_MAX) {
pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
...@@ -2120,25 +2234,6 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, ...@@ -2120,25 +2234,6 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
goto retry_baser; goto retry_baser;
} }
if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
/*
* Page size didn't stick. Let's try a smaller
* size and retry. If we reach 4K, then
* something is horribly wrong...
*/
free_pages((unsigned long)base, order);
baser->base = NULL;
switch (psz) {
case SZ_16K:
psz = SZ_4K;
goto retry_alloc_baser;
case SZ_64K:
psz = SZ_16K;
goto retry_alloc_baser;
}
}
if (val != tmp) { if (val != tmp) {
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type], &its->phys_base, its_base_type_string[type],
...@@ -2164,13 +2259,14 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, ...@@ -2164,13 +2259,14 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
static bool its_parse_indirect_baser(struct its_node *its, static bool its_parse_indirect_baser(struct its_node *its,
struct its_baser *baser, struct its_baser *baser,
u32 psz, u32 *order, u32 ids) u32 *order, u32 ids)
{ {
u64 tmp = its_read_baser(its, baser); u64 tmp = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(tmp); u64 type = GITS_BASER_TYPE(tmp);
u64 esz = GITS_BASER_ENTRY_SIZE(tmp); u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
u32 new_order = *order; u32 new_order = *order;
u32 psz = baser->psz;
bool indirect = false; bool indirect = false;
/* No need to enable Indirection if memory requirement < (psz*2)bytes */ /* No need to enable Indirection if memory requirement < (psz*2)bytes */
...@@ -2288,11 +2384,58 @@ static void its_free_tables(struct its_node *its) ...@@ -2288,11 +2384,58 @@ static void its_free_tables(struct its_node *its)
} }
} }
static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
{
u64 psz = SZ_64K;
while (psz) {
u64 val, gpsz;
val = its_read_baser(its, baser);
val &= ~GITS_BASER_PAGE_SIZE_MASK;
switch (psz) {
case SZ_64K:
gpsz = GITS_BASER_PAGE_SIZE_64K;
break;
case SZ_16K:
gpsz = GITS_BASER_PAGE_SIZE_16K;
break;
case SZ_4K:
default:
gpsz = GITS_BASER_PAGE_SIZE_4K;
break;
}
gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
its_write_baser(its, baser, val);
if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
break;
switch (psz) {
case SZ_64K:
psz = SZ_16K;
break;
case SZ_16K:
psz = SZ_4K;
break;
case SZ_4K:
default:
return -1;
}
}
baser->psz = psz;
return 0;
}
static int its_alloc_tables(struct its_node *its) static int its_alloc_tables(struct its_node *its)
{ {
u64 shr = GITS_BASER_InnerShareable; u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_RaWaWb; u64 cache = GITS_BASER_RaWaWb;
u32 psz = SZ_64K;
int err, i; int err, i;
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
...@@ -2303,16 +2446,22 @@ static int its_alloc_tables(struct its_node *its) ...@@ -2303,16 +2446,22 @@ static int its_alloc_tables(struct its_node *its)
struct its_baser *baser = its->tables + i; struct its_baser *baser = its->tables + i;
u64 val = its_read_baser(its, baser); u64 val = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(val); u64 type = GITS_BASER_TYPE(val);
u32 order = get_order(psz);
bool indirect = false; bool indirect = false;
u32 order;
switch (type) { if (type == GITS_BASER_TYPE_NONE)
case GITS_BASER_TYPE_NONE:
continue; continue;
if (its_probe_baser_psz(its, baser)) {
its_free_tables(its);
return -ENXIO;
}
order = get_order(baser->psz);
switch (type) {
case GITS_BASER_TYPE_DEVICE: case GITS_BASER_TYPE_DEVICE:
indirect = its_parse_indirect_baser(its, baser, indirect = its_parse_indirect_baser(its, baser, &order,
psz, &order,
device_ids(its)); device_ids(its));
break; break;
...@@ -2328,20 +2477,18 @@ static int its_alloc_tables(struct its_node *its) ...@@ -2328,20 +2477,18 @@ static int its_alloc_tables(struct its_node *its)
} }
} }
indirect = its_parse_indirect_baser(its, baser, indirect = its_parse_indirect_baser(its, baser, &order,
psz, &order,
ITS_MAX_VPEID_BITS); ITS_MAX_VPEID_BITS);
break; break;
} }
err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); err = its_setup_baser(its, baser, cache, shr, order, indirect);
if (err < 0) { if (err < 0) {
its_free_tables(its); its_free_tables(its);
return err; return err;
} }
/* Update settings which will be used for next BASERn */ /* Update settings which will be used for next BASERn */
psz = baser->psz;
cache = baser->val & GITS_BASER_CACHEABILITY_MASK; cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
shr = baser->val & GITS_BASER_SHAREABILITY_MASK; shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
} }
...@@ -2452,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) ...@@ -2452,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
if (!gic_rdists->has_rvpeid) if (!gic_rdists->has_rvpeid)
return true; return true;
/* Skip non-present CPUs */
if (!base)
return true;
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
...@@ -3482,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d, ...@@ -3482,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d,
{ {
struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
int from, cpu = cpumask_first(mask_val); int from, cpu = cpumask_first(mask_val);
unsigned long flags;
/* /*
* Changing affinity is mega expensive, so let's be as lazy as * Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped * we can and only do it if we really have to. Also, if mapped
* into the proxy device, we need to move the doorbell * into the proxy device, we need to move the doorbell
* interrupt to its new location. * interrupt to its new location.
*
* Another thing is that changing the affinity of a vPE affects
* *other interrupts* such as all the vLPIs that are routed to
* this vPE. This means that the irq_desc lock is not enough to
* protect us, and that we must ensure nobody samples vpe->col_idx
* during the update, hence the lock below which must also be
* taken on any vLPI handling path that evaluates vpe->col_idx.
*/ */
if (vpe->col_idx == cpu) from = vpe_to_cpuid_lock(vpe, &flags);
if (from == cpu)
goto out; goto out;
from = vpe->col_idx;
vpe->col_idx = cpu; vpe->col_idx = cpu;
/* /*
...@@ -3508,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d, ...@@ -3508,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
out: out:
irq_data_update_effective_affinity(d, cpumask_of(cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu));
vpe_to_cpuid_unlock(vpe, flags);
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }
...@@ -3528,7 +3688,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) ...@@ -3528,7 +3688,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->vpt_page)) & val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16); GENMASK_ULL(51, 16);
val |= GICR_VPENDBASER_RaWaWb; val |= GICR_VPENDBASER_RaWaWb;
val |= GICR_VPENDBASER_NonShareable; val |= GICR_VPENDBASER_InnerShareable;
/* /*
* There is no good way of finding out if the pending table is * There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very * empty as we can race against the doorbell interrupt very
...@@ -3619,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d) ...@@ -3619,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d)
void __iomem *rdbase; void __iomem *rdbase;
/* Target the redistributor this VPE is currently known on */ /* Target the redistributor this VPE is currently known on */
raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase); wait_for_syncr(rdbase);
raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
} else { } else {
its_vpe_send_cmd(vpe, its_send_inv); its_vpe_send_cmd(vpe, its_send_inv);
} }
...@@ -3675,12 +3837,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, ...@@ -3675,12 +3837,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
return 0; return 0;
} }
static int its_vpe_retrigger(struct irq_data *d)
{
return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
}
static struct irq_chip its_vpe_irq_chip = { static struct irq_chip its_vpe_irq_chip = {
.name = "GICv4-vpe", .name = "GICv4-vpe",
.irq_mask = its_vpe_mask_irq, .irq_mask = its_vpe_mask_irq,
.irq_unmask = its_vpe_unmask_irq, .irq_unmask = its_vpe_unmask_irq,
.irq_eoi = irq_chip_eoi_parent, .irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_vpe_set_affinity, .irq_set_affinity = its_vpe_set_affinity,
.irq_retrigger = its_vpe_retrigger,
.irq_set_irqchip_state = its_vpe_set_irqchip_state, .irq_set_irqchip_state = its_vpe_set_irqchip_state,
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
}; };
...@@ -3782,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe) ...@@ -3782,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe)
val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
/* Target the redistributor this vPE is currently known on */ /* Target the redistributor this vPE is currently known on */
raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVALLR); gic_write_lpir(val, rdbase + GICR_INVALLR);
wait_for_syncr(rdbase);
raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
} }
static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
...@@ -3818,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = { ...@@ -3818,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = {
.irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
}; };
static void its_configure_sgi(struct irq_data *d, bool clear)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_cmd_desc desc;
desc.its_vsgi_cmd.vpe = vpe;
desc.its_vsgi_cmd.sgi = d->hwirq;
desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
desc.its_vsgi_cmd.clear = clear;
/*
* GICv4.1 allows us to send VSGI commands to any ITS as long as the
* destination VPE is mapped there. Since we map them eagerly at
* activation time, we're pretty sure the first GICv4.1 ITS will do.
*/
its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
}
static void its_sgi_mask_irq(struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
vpe->sgi_config[d->hwirq].enabled = false;
its_configure_sgi(d, false);
}
static void its_sgi_unmask_irq(struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
vpe->sgi_config[d->hwirq].enabled = true;
its_configure_sgi(d, false);
}
static int its_sgi_set_affinity(struct irq_data *d,
const struct cpumask *mask_val,
bool force)
{
/*
* There is no notion of affinity for virtual SGIs, at least
* not on the host (since they can only be targetting a vPE).
* Tell the kernel we've done whatever it asked for.
*/
return IRQ_SET_MASK_OK;
}
static int its_sgi_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool state)
{
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
if (state) {
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its = find_4_1_its();
u64 val;
val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
} else {
its_configure_sgi(d, true);
}
return 0;
}
static int its_sgi_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool *val)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
void __iomem *base;
unsigned long flags;
u32 count = 1000000; /* 1s! */
u32 status;
int cpu;
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
/*
* Locking galore! We can race against two different events:
*
* - Concurent vPE affinity change: we must make sure it cannot
* happen, or we'll talk to the wrong redistributor. This is
* identical to what happens with vLPIs.
*
* - Concurrent VSGIPENDR access: As it involves accessing two
* MMIO registers, this must be made atomic one way or another.
*/
cpu = vpe_to_cpuid_lock(vpe, &flags);
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
do {
status = readl_relaxed(base + GICR_VSGIPENDR);
if (!(status & GICR_VSGIPENDR_BUSY))
goto out;
count--;
if (!count) {
pr_err_ratelimited("Unable to get SGI status\n");
goto out;
}
cpu_relax();
udelay(1);
} while (count);
out:
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
vpe_to_cpuid_unlock(vpe, flags);
if (!count)
return -ENXIO;
*val = !!(status & (1 << d->hwirq));
return 0;
}
static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_cmd_info *info = vcpu_info;
switch (info->cmd_type) {
case PROP_UPDATE_VSGI:
vpe->sgi_config[d->hwirq].priority = info->priority;
vpe->sgi_config[d->hwirq].group = info->group;
its_configure_sgi(d, false);
return 0;
default:
return -EINVAL;
}
}
static struct irq_chip its_sgi_irq_chip = {
.name = "GICv4.1-sgi",
.irq_mask = its_sgi_mask_irq,
.irq_unmask = its_sgi_unmask_irq,
.irq_set_affinity = its_sgi_set_affinity,
.irq_set_irqchip_state = its_sgi_set_irqchip_state,
.irq_get_irqchip_state = its_sgi_get_irqchip_state,
.irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
};
static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
struct its_vpe *vpe = args;
int i;
/* Yes, we do want 16 SGIs */
WARN_ON(nr_irqs != 16);
for (i = 0; i < 16; i++) {
vpe->sgi_config[i].priority = 0;
vpe->sgi_config[i].enabled = false;
vpe->sgi_config[i].group = false;
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
&its_sgi_irq_chip, vpe);
irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
}
return 0;
}
static void its_sgi_irq_domain_free(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs)
{
/* Nothing to do */
}
static int its_sgi_irq_domain_activate(struct irq_domain *domain,
struct irq_data *d, bool reserve)
{
/* Write out the initial SGI configuration */
its_configure_sgi(d, false);
return 0;
}
static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
/*
* The VSGI command is awkward:
*
* - To change the configuration, CLEAR must be set to false,
* leaving the pending bit unchanged.
* - To clear the pending bit, CLEAR must be set to true, leaving
* the configuration unchanged.
*
* You just can't do both at once, hence the two commands below.
*/
vpe->sgi_config[d->hwirq].enabled = false;
its_configure_sgi(d, false);
its_configure_sgi(d, true);
}
static const struct irq_domain_ops its_sgi_domain_ops = {
.alloc = its_sgi_irq_domain_alloc,
.free = its_sgi_irq_domain_free,
.activate = its_sgi_irq_domain_activate,
.deactivate = its_sgi_irq_domain_deactivate,
};
static int its_vpe_id_alloc(void) static int its_vpe_id_alloc(void)
{ {
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
...@@ -3851,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe) ...@@ -3851,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe)
return -ENOMEM; return -ENOMEM;
} }
raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id; vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page; vpe->vpt_page = vpt_page;
if (gic_rdists->has_rvpeid) if (gic_rdists->has_rvpeid)
...@@ -3960,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain, ...@@ -3960,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its; struct its_node *its;
/* If we use the list map, we issue VMAPP on demand... */ /*
if (its_list_map) * If we use the list map, we issue VMAPP on demand... Unless
* we're on a GICv4.1 and we eagerly map the VPE on all ITSs
* so that VSGIs can work.
*/
if (!gic_requires_eager_mapping())
return 0; return 0;
/* Map the VPE to the first possible CPU */ /* Map the VPE to the first possible CPU */
...@@ -3987,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, ...@@ -3987,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct its_node *its; struct its_node *its;
/* /*
* If we use the list map, we unmap the VPE once no VLPIs are * If we use the list map on GICv4.0, we unmap the VPE once no
* associated with the VM. * VLPIs are associated with the VM.
*/ */
if (its_list_map) if (!gic_requires_eager_mapping())
return; return;
list_for_each_entry(its, &its_nodes, entry) { list_for_each_entry(its, &its_nodes, entry) {
...@@ -4404,7 +4796,7 @@ static int __init its_probe_one(struct resource *res, ...@@ -4404,7 +4796,7 @@ static int __init its_probe_one(struct resource *res,
struct page *page; struct page *page;
int err; int err;
its_base = ioremap(res->start, resource_size(res)); its_base = ioremap(res->start, SZ_64K);
if (!its_base) { if (!its_base) {
pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
return -ENOMEM; return -ENOMEM;
...@@ -4455,6 +4847,13 @@ static int __init its_probe_one(struct resource *res, ...@@ -4455,6 +4847,13 @@ static int __init its_probe_one(struct resource *res,
if (is_v4_1(its)) { if (is_v4_1(its)) {
u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
if (!its->sgir_base) {
err = -ENOMEM;
goto out_free_its;
}
its->mpidr = readl_relaxed(its_base + GITS_MPIDR); its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
...@@ -4468,7 +4867,7 @@ static int __init its_probe_one(struct resource *res, ...@@ -4468,7 +4867,7 @@ static int __init its_probe_one(struct resource *res,
get_order(ITS_CMD_QUEUE_SZ)); get_order(ITS_CMD_QUEUE_SZ));
if (!page) { if (!page) {
err = -ENOMEM; err = -ENOMEM;
goto out_free_its; goto out_unmap_sgir;
} }
its->cmd_base = (void *)page_address(page); its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base; its->cmd_write = its->cmd_base;
...@@ -4535,6 +4934,9 @@ static int __init its_probe_one(struct resource *res, ...@@ -4535,6 +4934,9 @@ static int __init its_probe_one(struct resource *res,
its_free_tables(its); its_free_tables(its);
out_free_cmd: out_free_cmd:
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
out_free_its: out_free_its:
kfree(its); kfree(its);
out_unmap: out_unmap:
...@@ -4818,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, ...@@ -4818,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct device_node *of_node; struct device_node *of_node;
struct its_node *its; struct its_node *its;
bool has_v4 = false; bool has_v4 = false;
bool has_v4_1 = false;
int err; int err;
gic_rdists = rdists; gic_rdists = rdists;
...@@ -4838,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, ...@@ -4838,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
if (err) if (err)
return err; return err;
list_for_each_entry(its, &its_nodes, entry) list_for_each_entry(its, &its_nodes, entry) {
has_v4 |= is_v4(its); has_v4 |= is_v4(its);
has_v4_1 |= is_v4_1(its);
}
/* Don't bother with inconsistent systems */
if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
rdists->has_rvpeid = false;
if (has_v4 & rdists->has_vlpis) { if (has_v4 & rdists->has_vlpis) {
const struct irq_domain_ops *sgi_ops;
if (has_v4_1)
sgi_ops = &its_sgi_domain_ops;
else
sgi_ops = NULL;
if (its_init_vpe_domain() || if (its_init_vpe_domain() ||
its_init_v4(parent_domain, &its_vpe_domain_ops)) { its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
rdists->has_vlpis = false; rdists->has_vlpis = false;
pr_err("ITS: Disabling GICv4 support\n"); pr_err("ITS: Disabling GICv4 support\n");
} }
......
...@@ -723,6 +723,7 @@ static void __init gic_dist_init(void) ...@@ -723,6 +723,7 @@ static void __init gic_dist_init(void)
unsigned int i; unsigned int i;
u64 affinity; u64 affinity;
void __iomem *base = gic_data.dist_base; void __iomem *base = gic_data.dist_base;
u32 val;
/* Disable the distributor */ /* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR); writel_relaxed(0, base + GICD_CTLR);
...@@ -755,9 +756,14 @@ static void __init gic_dist_init(void) ...@@ -755,9 +756,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */ /* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
pr_info("Enabling SGIs without active state\n");
val |= GICD_CTLR_nASSGIreq;
}
/* Enable distributor with ARE, Group1 */ /* Enable distributor with ARE, Group1 */
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, writel_relaxed(val, base + GICD_CTLR);
base + GICD_CTLR);
/* /*
* Set all global interrupts to the boot CPU only. ARE must be * Set all global interrupts to the boot CPU only. ARE must be
...@@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) ...@@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer = gic_read_typer(ptr + GICR_TYPER); typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) { if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base; u64 offset = ptr - region->redist_base;
raw_spin_lock_init(&gic_data_rdist()->rd_lock);
gic_data_rdist_rd_base() = ptr; gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset; gic_data_rdist()->phys_base = region->phys_base + offset;
...@@ -1581,7 +1588,6 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -1581,7 +1588,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data); &gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true; gic_data.rdists.has_vlpis = true;
...@@ -1592,6 +1598,8 @@ static int __init gic_init_bases(void __iomem *dist_base, ...@@ -1592,6 +1598,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free; goto out_free;
} }
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.has_rss = !!(typer & GICD_TYPER_RSS); gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
pr_info("Distributor has %sRange Selector support\n", pr_info("Distributor has %sRange Selector support\n",
gic_data.has_rss ? "" : "no "); gic_data.has_rss ? "" : "no ");
...@@ -1757,6 +1765,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) ...@@ -1757,6 +1765,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.vcpu = r; gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
...@@ -2072,6 +2081,7 @@ static void __init gic_acpi_setup_kvm_info(void) ...@@ -2072,6 +2081,7 @@ static void __init gic_acpi_setup_kvm_info(void)
} }
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info); gic_set_kvm_info(&gic_v3_kvm_info);
} }
......
...@@ -85,6 +85,53 @@ ...@@ -85,6 +85,53 @@
static struct irq_domain *gic_domain; static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops; static const struct irq_domain_ops *vpe_domain_ops;
static const struct irq_domain_ops *sgi_domain_ops;
static bool has_v4_1(void)
{
return !!sgi_domain_ops;
}
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
{
char *name;
int sgi_base;
if (!has_v4_1())
return 0;
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
if (!name)
goto err;
vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
if (!vpe->fwnode)
goto err;
kfree(name);
name = NULL;
vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
sgi_domain_ops, vpe);
if (!vpe->sgi_domain)
goto err;
sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
NUMA_NO_NODE, vpe,
false, NULL);
if (sgi_base <= 0)
goto err;
return 0;
err:
if (vpe->sgi_domain)
irq_domain_remove(vpe->sgi_domain);
if (vpe->fwnode)
irq_domain_free_fwnode(vpe->fwnode);
kfree(name);
return -ENOMEM;
}
int its_alloc_vcpu_irqs(struct its_vm *vm) int its_alloc_vcpu_irqs(struct its_vm *vm)
{ {
...@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) ...@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if (vpe_base_irq <= 0) if (vpe_base_irq <= 0)
goto err; goto err;
for (i = 0; i < vm->nr_vpes; i++) for (i = 0; i < vm->nr_vpes; i++) {
int ret;
vm->vpes[i]->irq = vpe_base_irq + i; vm->vpes[i]->irq = vpe_base_irq + i;
ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
if (ret)
goto err;
}
return 0; return 0;
...@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) ...@@ -126,8 +178,28 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
return -ENOMEM; return -ENOMEM;
} }
static void its_free_sgi_irqs(struct its_vm *vm)
{
int i;
if (!has_v4_1())
return;
for (i = 0; i < vm->nr_vpes; i++) {
unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
if (WARN_ON(!irq))
continue;
irq_domain_free_irqs(irq, 16);
irq_domain_remove(vm->vpes[i]->sgi_domain);
irq_domain_free_fwnode(vm->vpes[i]->fwnode);
}
}
void its_free_vcpu_irqs(struct its_vm *vm) void its_free_vcpu_irqs(struct its_vm *vm)
{ {
its_free_sgi_irqs(vm);
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain); irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode); irq_domain_free_fwnode(vm->fwnode);
...@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) ...@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info); return irq_set_vcpu_affinity(vpe->irq, info);
} }
int its_schedule_vpe(struct its_vpe *vpe, bool on) int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
{
struct irq_desc *desc = irq_to_desc(vpe->irq);
struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
info.cmd_type = DESCHEDULE_VPE;
if (has_v4_1()) {
/* GICv4.1 can directly deal with doorbells */
info.req_db = db;
} else {
/* Undo the nested disable_irq() calls... */
while (db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = false;
return ret;
}
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{ {
struct its_cmd_info info; struct its_cmd_info info = { };
int ret; int ret;
WARN_ON(preemptible()); WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; info.cmd_type = SCHEDULE_VPE;
if (has_v4_1()) {
info.g0en = g0en;
info.g1en = g1en;
} else {
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
}
ret = its_send_vpe_cmd(vpe, &info); ret = its_send_vpe_cmd(vpe, &info);
if (!ret) if (!ret)
vpe->resident = on; vpe->resident = true;
return ret; return ret;
} }
...@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) ...@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return irq_set_vcpu_affinity(irq, &info); return irq_set_vcpu_affinity(irq, &info);
} }
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) int its_prop_update_vsgi(int irq, u8 priority, bool group)
{
struct its_cmd_info info = {
.cmd_type = PROP_UPDATE_VSGI,
{
.priority = priority,
.group = group,
},
};
return irq_set_vcpu_affinity(irq, &info);
}
int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops)
{ {
if (domain) { if (domain) {
pr_info("ITS: Enabling GICv4 support\n"); pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain; gic_domain = domain;
vpe_domain_ops = ops; vpe_domain_ops = vpe_ops;
sgi_domain_ops = sgi_ops;
return 0; return 0;
} }
......
...@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi) ...@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi)
raw_spin_unlock_irqrestore(&i8259A_lock, flags); raw_spin_unlock_irqrestore(&i8259A_lock, flags);
} }
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = { static struct resource pic1_io_resource = {
.name = "pic1", .name = "pic1",
.start = PIC_MASTER_CMD, .start = PIC_MASTER_CMD,
...@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = { ...@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = {
*/ */
struct irq_domain * __init __init_i8259_irqs(struct device_node *node) struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
{ {
/*
* PIC_CASCADE_IR is cascade interrupt to second interrupt controller
*/
int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
struct irq_domain *domain; struct irq_domain *domain;
insert_resource(&ioport_resource, &pic1_io_resource); insert_resource(&ioport_resource, &pic1_io_resource);
...@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) ...@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (!domain) if (!domain)
panic("Failed to add i8259 IRQ domain"); panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to register cascade interrupt\n");
register_syscore_ops(&i8259_syscore_ops); register_syscore_ops(&i8259_syscore_ops);
return domain; return domain;
} }
......
...@@ -180,3 +180,4 @@ static int __init ingenic_tcu_irq_init(struct device_node *np, ...@@ -180,3 +180,4 @@ static int __init ingenic_tcu_irq_init(struct device_node *np,
IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init); IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
...@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data) ...@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction intc_cascade_action = {
.handler = intc_cascade,
.name = "SoC intc cascade interrupt",
};
static int __init ingenic_intc_of_init(struct device_node *node, static int __init ingenic_intc_of_init(struct device_node *node,
unsigned num_chips) unsigned num_chips)
{ {
...@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node, ...@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node,
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK); irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
} }
setup_irq(parent_irq, &intc_cascade_action); if (request_irq(parent_irq, intc_cascade, 0,
"SoC intc cascade interrupt", NULL))
pr_err("Failed to register SoC intc cascade interrupt\n");
return 0; return 0;
out_domain_remove: out_domain_remove:
......
...@@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) ...@@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
} }
i->iomem = devm_ioremap(dev, io[k]->start, i->iomem = devm_ioremap(dev, io[k]->start,
resource_size(io[k])); resource_size(io[k]));
if (!i->iomem) { if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n"); dev_err(dev, "failed to remap IOMEM\n");
ret = -ENXIO; ret = -ENXIO;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 2018 Christoph Hellwig * Copyright (C) 2018 Christoph Hellwig
*/ */
#define pr_fmt(fmt) "plic: " fmt #define pr_fmt(fmt) "plic: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/irq.h> #include <linux/irq.h>
...@@ -55,7 +56,14 @@ ...@@ -55,7 +56,14 @@
#define CONTEXT_THRESHOLD 0x00 #define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04 #define CONTEXT_CLAIM 0x04
static void __iomem *plic_regs; #define PLIC_DISABLE_THRESHOLD 0xf
#define PLIC_ENABLE_THRESHOLD 0
struct plic_priv {
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
};
struct plic_handler { struct plic_handler {
bool present; bool present;
...@@ -66,6 +74,7 @@ struct plic_handler { ...@@ -66,6 +74,7 @@ struct plic_handler {
*/ */
raw_spinlock_t enable_lock; raw_spinlock_t enable_lock;
void __iomem *enable_base; void __iomem *enable_base;
struct plic_priv *priv;
}; };
static DEFINE_PER_CPU(struct plic_handler, plic_handlers); static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
...@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler, ...@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler,
} }
static inline void plic_irq_toggle(const struct cpumask *mask, static inline void plic_irq_toggle(const struct cpumask *mask,
int hwirq, int enable) struct irq_data *d, int enable)
{ {
int cpu; int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) { for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) if (handler->present &&
plic_toggle(handler, hwirq, enable); cpumask_test_cpu(cpu, &handler->priv->lmask))
plic_toggle(handler, d->hwirq, enable);
} }
} }
static void plic_irq_unmask(struct irq_data *d) static void plic_irq_unmask(struct irq_data *d)
{ {
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), struct cpumask amask;
cpu_online_mask); unsigned int cpu;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
&amask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return; return;
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); plic_irq_toggle(cpumask_of(cpu), d, 1);
} }
static void plic_irq_mask(struct irq_data *d) static void plic_irq_mask(struct irq_data *d)
{ {
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); struct plic_priv *priv = irq_get_chip_data(d->irq);
plic_irq_toggle(&priv->lmask, d, 0);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d, ...@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force) const struct cpumask *mask_val, bool force)
{ {
unsigned int cpu; unsigned int cpu;
struct cpumask amask;
struct plic_priv *priv = irq_get_chip_data(d->irq);
cpumask_and(&amask, &priv->lmask, mask_val);
if (force) if (force)
cpu = cpumask_first(mask_val); cpu = cpumask_first(&amask);
else else
cpu = cpumask_any_and(mask_val, cpu_online_mask); cpu = cpumask_any_and(&amask, cpu_online_mask);
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); plic_irq_toggle(&priv->lmask, d, 0);
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); plic_irq_toggle(cpumask_of(cpu), d, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu)); irq_data_update_effective_affinity(d, cpumask_of(cpu));
...@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = { ...@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.free = irq_domain_free_irqs_top, .free = irq_domain_free_irqs_top,
}; };
static struct irq_domain *plic_irqdomain;
/* /*
* Handling an interrupt is a two-step process: first you claim the interrupt * Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing * by reading the claim register, then you complete the interrupt by writing
...@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs) ...@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs)
csr_clear(CSR_IE, IE_EIE); csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) { while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq); int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
if (unlikely(irq <= 0)) if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n", pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
...@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node) ...@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node)
return -1; return -1;
} }
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
{
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
}
static int plic_dying_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_clear(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
return 0;
}
static int plic_starting_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_set(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
}
static int __init plic_init(struct device_node *node, static int __init plic_init(struct device_node *node,
struct device_node *parent) struct device_node *parent)
{ {
int error = 0, nr_contexts, nr_handlers = 0, i; int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs; u32 nr_irqs;
struct plic_priv *priv;
if (plic_regs) { priv = kzalloc(sizeof(*priv), GFP_KERNEL);
pr_warn("PLIC already present.\n"); if (!priv)
return -ENXIO; return -ENOMEM;
}
plic_regs = of_iomap(node, 0); priv->regs = of_iomap(node, 0);
if (WARN_ON(!plic_regs)) if (WARN_ON(!priv->regs)) {
return -EIO; error = -EIO;
goto out_free_priv;
}
error = -EINVAL; error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs); of_property_read_u32(node, "riscv,ndev", &nr_irqs);
...@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node, ...@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node,
goto out_iounmap; goto out_iounmap;
error = -ENOMEM; error = -ENOMEM;
plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1, priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, NULL); &plic_irqdomain_ops, priv);
if (WARN_ON(!plic_irqdomain)) if (WARN_ON(!priv->irqdomain))
goto out_iounmap; goto out_iounmap;
for (i = 0; i < nr_contexts; i++) { for (i = 0; i < nr_contexts; i++) {
...@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node, ...@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler; struct plic_handler *handler;
irq_hw_number_t hwirq; irq_hw_number_t hwirq;
int cpu, hartid; int cpu, hartid;
u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) { if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i); pr_err("failed to parse parent for context %d.\n", i);
...@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node, ...@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node,
handler = per_cpu_ptr(&plic_handlers, cpu); handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) { if (handler->present) {
pr_warn("handler already present for context %d.\n", i); pr_warn("handler already present for context %d.\n", i);
threshold = 0xffffffff; plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done; goto done;
} }
cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true; handler->present = true;
handler->hart_base = handler->hart_base =
plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock); raw_spin_lock_init(&handler->enable_lock);
handler->enable_base = handler->enable_base =
plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
handler->priv = priv;
done: done:
/* priority must be > threshold to trigger an interrupt */
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0); plic_toggle(handler, hwirq, 0);
nr_handlers++; nr_handlers++;
} }
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
nr_irqs, nr_handlers, nr_contexts); nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq); set_handle_irq(plic_handle_irq);
return 0; return 0;
out_iounmap: out_iounmap:
iounmap(plic_regs); iounmap(priv->regs);
out_free_priv:
kfree(priv);
return error; return error;
} }
......
...@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void) ...@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void)
unregister_syscore_ops(&stm32_exti_h_syscore_ops); unregister_syscore_ops(&stm32_exti_h_syscore_ops);
} }
static int stm32_exti_h_retrigger(struct irq_data *d)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
void __iomem *base = chip_data->host_data->base;
u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
writel_relaxed(mask, base + stm32_bank->swier_ofst);
return 0;
}
static struct irq_chip stm32_exti_h_chip = { static struct irq_chip stm32_exti_h_chip = {
.name = "stm32-exti-h", .name = "stm32-exti-h",
.irq_eoi = stm32_exti_h_eoi, .irq_eoi = stm32_exti_h_eoi,
.irq_mask = stm32_exti_h_mask, .irq_mask = stm32_exti_h_mask,
.irq_unmask = stm32_exti_h_unmask, .irq_unmask = stm32_exti_h_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = stm32_exti_h_retrigger,
.irq_set_type = stm32_exti_h_set_type, .irq_set_type = stm32_exti_h_set_type,
.irq_set_wake = stm32_exti_h_set_wake, .irq_set_wake = stm32_exti_h_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND, .flags = IRQCHIP_MASK_ON_SUSPEND,
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h> #include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) ...@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc) static void fpga_irq_handle(struct irq_desc *desc)
{ {
struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc); struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
u32 status = readl(f->base + IRQ_STATUS); u32 status;
chained_irq_enter(chip, desc);
status = readl(f->base + IRQ_STATUS);
if (status == 0) { if (status == 0) {
do_bad_IRQ(desc); do_bad_IRQ(desc);
return; goto out;
} }
do { do {
...@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) ...@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq); status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq)); generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status); } while (status);
out:
chained_irq_exit(chip, desc);
} }
/* /*
...@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, ...@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask)) if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0; valid_mask = 0;
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/* Some chips are cascaded from a parent IRQ */ /* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0); parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) { if (!parent_irq) {
...@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, ...@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
/* /*
* On Versatile AB/PB, some secondary interrupts have a direct * On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need * pass-thru to the primary controller for IRQs 20 and 22-31 which need
......
...@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node, ...@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node,
void __iomem *regs; void __iomem *regs;
u32 interrupt_mask = ~0; u32 interrupt_mask = ~0;
u32 wakeup_mask = ~0; u32 wakeup_mask = ~0;
int parent_irq;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
regs = of_iomap(node, 0); regs = of_iomap(node, 0);
if (WARN_ON(!regs)) if (WARN_ON(!regs))
...@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node, ...@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node,
of_property_read_u32(node, "valid-mask", &interrupt_mask); of_property_read_u32(node, "valid-mask", &interrupt_mask);
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask); of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
parent_irq = of_irq_get(node, 0);
if (parent_irq < 0)
parent_irq = 0;
/* /*
* Passing 0 as first IRQ makes the simple domain allocate descriptors * Passing 0 as first IRQ makes the simple domain allocate descriptors
*/ */
__vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
return 0; return 0;
} }
......
...@@ -38,29 +38,31 @@ struct xintc_irq_chip { ...@@ -38,29 +38,31 @@ struct xintc_irq_chip {
void __iomem *base; void __iomem *base;
struct irq_domain *root_domain; struct irq_domain *root_domain;
u32 intr_mask; u32 intr_mask;
u32 nr_irq;
}; };
static struct xintc_irq_chip *xintc_irqc; static struct xintc_irq_chip *primary_intc;
static void xintc_write(int reg, u32 data) static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
{ {
if (static_branch_unlikely(&xintc_is_be)) if (static_branch_unlikely(&xintc_is_be))
iowrite32be(data, xintc_irqc->base + reg); iowrite32be(data, irqc->base + reg);
else else
iowrite32(data, xintc_irqc->base + reg); iowrite32(data, irqc->base + reg);
} }
static unsigned int xintc_read(int reg) static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
{ {
if (static_branch_unlikely(&xintc_is_be)) if (static_branch_unlikely(&xintc_is_be))
return ioread32be(xintc_irqc->base + reg); return ioread32be(irqc->base + reg);
else else
return ioread32(xintc_irqc->base + reg); return ioread32(irqc->base + reg);
} }
static void intc_enable_or_unmask(struct irq_data *d) static void intc_enable_or_unmask(struct irq_data *d)
{ {
unsigned long mask = 1 << d->hwirq; struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq); pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
...@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d) ...@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler * acks the irq before calling the interrupt handler
*/ */
if (irqd_is_level_type(d)) if (irqd_is_level_type(d))
xintc_write(IAR, mask); xintc_write(irqc, IAR, mask);
xintc_write(SIE, mask); xintc_write(irqc, SIE, mask);
} }
static void intc_disable_or_mask(struct irq_data *d) static void intc_disable_or_mask(struct irq_data *d)
{ {
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq); pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
xintc_write(CIE, 1 << d->hwirq); xintc_write(irqc, CIE, BIT(d->hwirq));
} }
static void intc_ack(struct irq_data *d) static void intc_ack(struct irq_data *d)
{ {
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq); pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
xintc_write(IAR, 1 << d->hwirq); xintc_write(irqc, IAR, BIT(d->hwirq));
} }
static void intc_mask_ack(struct irq_data *d) static void intc_mask_ack(struct irq_data *d)
{ {
unsigned long mask = 1 << d->hwirq; struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq); pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
xintc_write(CIE, mask); xintc_write(irqc, CIE, mask);
xintc_write(IAR, mask); xintc_write(irqc, IAR, mask);
} }
static struct irq_chip intc_dev = { static struct irq_chip intc_dev = {
...@@ -103,13 +110,14 @@ static struct irq_chip intc_dev = { ...@@ -103,13 +110,14 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack, .irq_mask_ack = intc_mask_ack,
}; };
unsigned int xintc_get_irq(void) static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
{ {
unsigned int hwirq, irq = -1; unsigned int irq = 0;
u32 hwirq;
hwirq = xintc_read(IVR); hwirq = xintc_read(irqc, IVR);
if (hwirq != -1U) if (hwirq != -1U)
irq = irq_find_mapping(xintc_irqc->root_domain, hwirq); irq = irq_find_mapping(irqc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
...@@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void) ...@@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{ {
if (xintc_irqc->intr_mask & (1 << hw)) { struct xintc_irq_chip *irqc = d->host_data;
if (irqc->intr_mask & BIT(hw)) {
irq_set_chip_and_handler_name(irq, &intc_dev, irq_set_chip_and_handler_name(irq, &intc_dev,
handle_edge_irq, "edge"); handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL); irq_clear_status_flags(irq, IRQ_LEVEL);
} else { } else {
irq_set_chip_and_handler_name(irq, &intc_dev, irq_set_chip_and_handler_name(irq, &intc_dev,
handle_level_irq, "level"); handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL); irq_set_status_flags(irq, IRQ_LEVEL);
} }
irq_set_chip_data(irq, irqc);
return 0; return 0;
} }
...@@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = { ...@@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc) static void xil_intc_irq_handler(struct irq_desc *desc)
{ {
struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip *chip = irq_desc_get_chip(desc);
struct xintc_irq_chip *irqc;
u32 pending; u32 pending;
irqc = irq_data_get_irq_handler_data(&desc->irq_data);
chained_irq_enter(chip, desc); chained_irq_enter(chip, desc);
do { do {
pending = xintc_get_irq(); pending = xintc_get_irq_local(irqc);
if (pending == -1U) if (pending == 0)
break; break;
generic_handle_irq(pending); generic_handle_irq(pending);
} while (true); } while (true);
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
} }
static void xil_intc_handle_irq(struct pt_regs *regs)
{
u32 hwirq;
struct xintc_irq_chip *irqc = primary_intc;
do {
hwirq = xintc_read(irqc, IVR);
if (likely(hwirq != -1U)) {
int ret;
ret = handle_domain_irq(irqc->root_domain, hwirq, regs);
WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq);
continue;
}
break;
} while (1);
}
static int __init xilinx_intc_of_init(struct device_node *intc, static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent) struct device_node *parent)
{ {
u32 nr_irq;
int ret, irq;
struct xintc_irq_chip *irqc; struct xintc_irq_chip *irqc;
int ret, irq;
if (xintc_irqc) {
pr_err("irq-xilinx: Multiple instances aren't supported\n");
return -EINVAL;
}
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc) if (!irqc)
return -ENOMEM; return -ENOMEM;
xintc_irqc = irqc;
irqc->base = of_iomap(intc, 0); irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base); BUG_ON(!irqc->base);
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq); ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) { if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n"); pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
goto err_alloc; goto error;
} }
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask); ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
...@@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc, ...@@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0; irqc->intr_mask = 0;
} }
if (irqc->intr_mask >> nr_irq) if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
intc, nr_irq, irqc->intr_mask); intc, irqc->nr_irq, irqc->intr_mask);
/* /*
* Disable all external interrupts until they are * Disable all external interrupts until they are
* explicity requested. * explicity requested.
*/ */
xintc_write(IER, 0); xintc_write(irqc, IER, 0);
/* Acknowledge any pending interrupts just in case. */ /* Acknowledge any pending interrupts just in case. */
xintc_write(IAR, 0xffffffff); xintc_write(irqc, IAR, 0xffffffff);
/* Turn on the Master Enable. */ /* Turn on the Master Enable. */
xintc_write(MER, MER_HIE | MER_ME); xintc_write(irqc, MER, MER_HIE | MER_ME);
if (!(xintc_read(MER) & (MER_HIE | MER_ME))) { if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
static_branch_enable(&xintc_is_be); static_branch_enable(&xintc_is_be);
xintc_write(MER, MER_HIE | MER_ME); xintc_write(irqc, MER, MER_HIE | MER_ME);
} }
irqc->root_domain = irq_domain_add_linear(intc, nr_irq, irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc); &xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) { if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n"); pr_err("irq-xilinx: Unable to create IRQ domain\n");
goto err_alloc; ret = -EINVAL;
goto error;
} }
if (parent) { if (parent) {
...@@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc, ...@@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else { } else {
pr_err("irq-xilinx: interrupts property not in DT\n"); pr_err("irq-xilinx: interrupts property not in DT\n");
ret = -EINVAL; ret = -EINVAL;
goto err_alloc; goto error;
} }
} else { } else {
irq_set_default_host(irqc->root_domain); primary_intc = irqc;
set_handle_irq(xil_intc_handle_irq);
} }
return 0; return 0;
err_alloc: error:
xintc_irqc = NULL; iounmap(irqc->base);
kfree(irqc); kfree(irqc);
return ret; return ret;
......
...@@ -33,7 +33,7 @@ struct combiner { ...@@ -33,7 +33,7 @@ struct combiner {
int parent_irq; int parent_irq;
u32 nirqs; u32 nirqs;
u32 nregs; u32 nregs;
struct combiner_reg regs[0]; struct combiner_reg regs[];
}; };
static inline int irq_nr(u32 reg, u32 bit) static inline int irq_nr(u32 reg, u32 bit)
......
...@@ -92,6 +92,7 @@ struct stm32_gpio_bank { ...@@ -92,6 +92,7 @@ struct stm32_gpio_bank {
u32 bank_nr; u32 bank_nr;
u32 bank_ioport_nr; u32 bank_ioport_nr;
u32 pin_backup[STM32_GPIO_PINS_PER_BANK]; u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
u8 irq_type[STM32_GPIO_PINS_PER_BANK];
}; };
struct stm32_pinctrl { struct stm32_pinctrl {
...@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = { ...@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = {
.get_direction = stm32_gpio_get_direction, .get_direction = stm32_gpio_get_direction,
}; };
static void stm32_gpio_irq_trigger(struct irq_data *d)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
int level;
/* If level interrupt type then retrig */
level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
}
static void stm32_gpio_irq_eoi(struct irq_data *d)
{
irq_chip_eoi_parent(d);
stm32_gpio_irq_trigger(d);
};
static int stm32_gpio_set_type(struct irq_data *d, unsigned int type)
{
struct stm32_gpio_bank *bank = d->domain->host_data;
u32 parent_type;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
parent_type = type;
break;
case IRQ_TYPE_LEVEL_HIGH:
parent_type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_LOW:
parent_type = IRQ_TYPE_EDGE_FALLING;
break;
default:
return -EINVAL;
}
bank->irq_type[d->hwirq] = type;
return irq_chip_set_type_parent(d, parent_type);
};
static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{ {
struct stm32_gpio_bank *bank = irq_data->domain->host_data; struct stm32_gpio_bank *bank = irq_data->domain->host_data;
...@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data) ...@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq); gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
} }
static void stm32_gpio_irq_unmask(struct irq_data *d)
{
irq_chip_unmask_parent(d);
stm32_gpio_irq_trigger(d);
}
static struct irq_chip stm32_gpio_irq_chip = { static struct irq_chip stm32_gpio_irq_chip = {
.name = "stm32gpio", .name = "stm32gpio",
.irq_eoi = irq_chip_eoi_parent, .irq_eoi = stm32_gpio_irq_eoi,
.irq_ack = irq_chip_ack_parent, .irq_ack = irq_chip_ack_parent,
.irq_mask = irq_chip_mask_parent, .irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent, .irq_unmask = stm32_gpio_irq_unmask,
.irq_set_type = irq_chip_set_type_parent, .irq_set_type = stm32_gpio_set_type,
.irq_set_wake = irq_chip_set_wake_parent, .irq_set_wake = irq_chip_set_wake_parent,
.irq_request_resources = stm32_gpio_irq_request_resources, .irq_request_resources = stm32_gpio_irq_request_resources,
.irq_release_resources = stm32_gpio_irq_release_resources, .irq_release_resources = stm32_gpio_irq_release_resources,
......
...@@ -70,6 +70,7 @@ struct vgic_global { ...@@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */ /* Hardware has GICv4? */
bool has_gicv4; bool has_gicv4;
bool has_gicv4_1;
/* GIC system register CPU interface */ /* GIC system register CPU interface */
struct static_key_false gicv3_cpuif; struct static_key_false gicv3_cpuif;
......
...@@ -102,6 +102,7 @@ enum cpuhp_state { ...@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
......
...@@ -32,6 +32,8 @@ struct gic_kvm_info { ...@@ -32,6 +32,8 @@ struct gic_kvm_info {
struct resource vctrl; struct resource vctrl;
/* vlpi support */ /* vlpi support */
bool has_v4; bool has_v4;
/* rvpeid support */
bool has_v4_1;
}; };
const struct gic_kvm_info *gic_get_kvm_info(void); const struct gic_kvm_info *gic_get_kvm_info(void);
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#define GICD_SPENDSGIR 0x0F20 #define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31) #define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6) #define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4) #define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1) #define GICD_CTLR_ENABLE_G1A (1U << 1)
...@@ -90,6 +91,7 @@ ...@@ -90,6 +91,7 @@
#define GICD_TYPER_ESPIS(typer) \ #define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
#define GICD_TYPER2_nASSGIcap (1U << 8)
#define GICD_TYPER2_VIL (1U << 7) #define GICD_TYPER2_VIL (1U << 7)
#define GICD_TYPER2_VID GENMASK(4, 0) #define GICD_TYPER2_VID GENMASK(4, 0)
...@@ -320,6 +322,9 @@ ...@@ -320,6 +322,9 @@
#define GICR_VPENDBASER_NonShareable \ #define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
#define GICR_VPENDBASER_InnerShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
...@@ -343,6 +348,15 @@ ...@@ -343,6 +348,15 @@
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) #define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) #define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
#define GICR_VSGIR 0x0080
#define GICR_VSGIR_VPEID GENMASK(15, 0)
#define GICR_VSGIPENDR 0x0088
#define GICR_VSGIPENDR_BUSY (1U << 31)
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
/* /*
* ITS registers, offsets from ITS_base * ITS registers, offsets from ITS_base
*/ */
...@@ -366,6 +380,11 @@ ...@@ -366,6 +380,11 @@
#define GITS_TRANSLATER 0x10040 #define GITS_TRANSLATER 0x10040
#define GITS_SGIR 0x20020
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
#define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1) #define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4 #define GITS_CTLR_ITS_NUMBER_SHIFT 4
...@@ -500,8 +519,9 @@ ...@@ -500,8 +519,9 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) #define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) #define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) #define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */ /* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) #define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) #define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/* /*
...@@ -650,6 +670,7 @@ ...@@ -650,6 +670,7 @@
struct rdists { struct rdists {
struct { struct {
raw_spinlock_t rd_lock;
void __iomem *rd_base; void __iomem *rd_base;
struct page *pend_page; struct page *pend_page;
phys_addr_t phys_base; phys_addr_t phys_base;
......
...@@ -49,10 +49,22 @@ struct its_vpe { ...@@ -49,10 +49,22 @@ struct its_vpe {
}; };
/* GICv4.1 implementations */ /* GICv4.1 implementations */
struct { struct {
struct fwnode_handle *fwnode;
struct irq_domain *sgi_domain;
struct {
u8 priority;
bool enabled;
bool group;
} sgi_config[16];
atomic_t vmapp_count; atomic_t vmapp_count;
}; };
}; };
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
*/
raw_spinlock_t vpe_lock;
/* /*
* This collection ID is used to indirect the target * This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in * redistributor for this VPE. The ID itself isn't involved in
...@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type { ...@@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type {
SCHEDULE_VPE, SCHEDULE_VPE,
DESCHEDULE_VPE, DESCHEDULE_VPE,
INVALL_VPE, INVALL_VPE,
PROP_UPDATE_VSGI,
}; };
struct its_cmd_info { struct its_cmd_info {
...@@ -105,19 +118,27 @@ struct its_cmd_info { ...@@ -105,19 +118,27 @@ struct its_cmd_info {
bool g0en; bool g0en;
bool g1en; bool g1en;
}; };
struct {
u8 priority;
bool group;
};
}; };
}; };
int its_alloc_vcpu_irqs(struct its_vm *vm); int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm);
int its_schedule_vpe(struct its_vpe *vpe, bool on); int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_invall_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq); int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv); int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
struct irq_domain_ops; struct irq_domain_ops;
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); int its_init_v4(struct irq_domain *domain,
const struct irq_domain_ops *vpe_ops,
const struct irq_domain_ops *sgi_ops);
#endif #endif
...@@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
/* GICv4 support? */ /* GICv4 support? */
if (info->has_v4) { if (info->has_v4) {
kvm_vgic_global_state.has_gicv4 = gicv4_enable; kvm_vgic_global_state.has_gicv4 = gicv4_enable;
kvm_info("GICv4 support %sabled\n", kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
kvm_info("GICv4%s support %sabled\n",
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
gicv4_enable ? "en" : "dis"); gicv4_enable ? "en" : "dis");
} }
......
...@@ -67,10 +67,10 @@ ...@@ -67,10 +67,10 @@
* it. And if we've migrated our vcpu from one CPU to another, we must * it. And if we've migrated our vcpu from one CPU to another, we must
* tell the ITS (so that the messages reach the right redistributor). * tell the ITS (so that the messages reach the right redistributor).
* This is done in two steps: first issue a irq_set_affinity() on the * This is done in two steps: first issue a irq_set_affinity() on the
* irq corresponding to the vcpu, then call its_schedule_vpe(). You * irq corresponding to the vcpu, then call its_make_vpe_resident().
* must be in a non-preemptible context. On exit, another call to * You must be in a non-preemptible context. On exit, a call to
* its_schedule_vpe() tells the redistributor that we're done with the * its_make_vpe_non_resident() tells the redistributor that we're done
* vcpu. * with the vcpu.
* *
* Finally, the doorbell handling: Each vcpu is allocated an interrupt * Finally, the doorbell handling: Each vcpu is allocated an interrupt
* which will fire each time a VLPI is made pending whilst the vcpu is * which will fire each time a VLPI is made pending whilst the vcpu is
...@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) ...@@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
struct kvm_vcpu *vcpu = info; struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */ /* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) if (!kvm_vgic_global_state.has_gicv4_1 &&
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq); disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
...@@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm) ...@@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{ {
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0; return 0;
/* return its_make_vpe_non_resident(vpe, need_db);
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
return its_schedule_vpe(vpe, false);
} }
int vgic_v4_load(struct kvm_vcpu *vcpu) int vgic_v4_load(struct kvm_vcpu *vcpu)
...@@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) ...@@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
if (err) if (err)
return err; return err;
/* Disabled the doorbell, as we're about to enter the guest */ err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
disable_irq_nosync(vpe->irq);
err = its_schedule_vpe(vpe, true);
if (err) if (err)
return err; return err;
/* /*
* Now that the VPE is resident, let's get rid of a potential * Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending. * doorbell interrupt that would still be pending. This is a
* GICv4.0 only "feature"...
*/ */
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); if (!kvm_vgic_global_state.has_gicv4_1)
err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
return err;
} }
static struct vgic_its *vgic_get_its(struct kvm *kvm, static struct vgic_its *vgic_get_its(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment