Commit 0097852c authored by Thomas Gleixner's avatar Thomas Gleixner

Merge tag 'irqchip-for-4.7' of...

Merge tag 'irqchip-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core

Pull irqchip updates for Linux 4.7 from Marc Zyngier

- Layerscape SCFG MSI controller support
- LPC32xx interrupt controller support
- RPi irqchip support on arm64
- GICv2 cleanup
- GICv2 and GICv3 bug fixes
parents 9d9b7eed a1dcbd11
* Freescale Layerscape SCFG PCIe MSI controller
Required properties:
- compatible: should be "fsl,<soc-name>-msi" to identify
Layerscape PCIe MSI controller block such as:
"fsl,1s1021a-msi"
"fsl,1s1043a-msi"
- msi-controller: indicates that this is a PCIe MSI controller node
- reg: physical base address of the controller and length of memory mapped.
- interrupts: an interrupt to the parent interrupt controller.
Optional properties:
- interrupt-parent: the phandle to the parent interrupt controller.
This interrupt controller hardware is a second level interrupt controller that
is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
platforms. If interrupt-parent is not provided, the default parent interrupt
controller will be used.
Each PCIe node needs to have property msi-parent that points to
MSI controller node
Examples:
msi1: msi-controller@1571000 {
compatible = "fsl,1s1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>,
msi-controller;
interrupts = <0 116 0x4>;
};
...@@ -531,6 +531,8 @@ config ARCH_LPC32XX ...@@ -531,6 +531,8 @@ config ARCH_LPC32XX
select COMMON_CLK select COMMON_CLK
select CPU_ARM926T select CPU_ARM926T
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select MULTI_IRQ_HANDLER
select SPARSE_IRQ
select USE_OF select USE_OF
help help
Support for the NXP LPC32XX family of processors Support for the NXP LPC32XX family of processors
......
...@@ -206,7 +206,6 @@ static const char *const lpc32xx_dt_compat[] __initconst = { ...@@ -206,7 +206,6 @@ static const char *const lpc32xx_dt_compat[] __initconst = {
DT_MACHINE_START(LPC32XX_DT, "LPC32XX SoC (Flattened Device Tree)") DT_MACHINE_START(LPC32XX_DT, "LPC32XX SoC (Flattened Device Tree)")
.atag_offset = 0x100, .atag_offset = 0x100,
.map_io = lpc32xx_map_io, .map_io = lpc32xx_map_io,
.init_irq = lpc32xx_init_irq,
.init_machine = lpc3250_machine_init, .init_machine = lpc3250_machine_init,
.dt_compat = lpc32xx_dt_compat, .dt_compat = lpc32xx_dt_compat,
MACHINE_END MACHINE_END
...@@ -246,5 +246,10 @@ config MVEBU_ODMI ...@@ -246,5 +246,10 @@ config MVEBU_ODMI
bool bool
select GENERIC_MSI_IRQ_DOMAIN select GENERIC_MSI_IRQ_DOMAIN
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
select PCI_MSI_IRQ_DOMAIN
config PARTITION_PERCPU config PARTITION_PERCPU
bool bool
...@@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o ...@@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_IRQ_MXS) += irq-mxs.o obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
...@@ -66,3 +67,4 @@ obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o ...@@ -66,3 +67,4 @@ obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
...@@ -195,7 +195,7 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask, ...@@ -195,7 +195,7 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
* Ensure that stores to normal memory are visible to the * Ensure that stores to normal memory are visible to the
* other CPUs before issuing the IPI. * other CPUs before issuing the IPI.
*/ */
dsb(); smp_wmb();
for_each_cpu(cpu, mask) { for_each_cpu(cpu, mask) {
writel(1 << ipi, mailbox0_base + 16 * cpu); writel(1 << ipi, mailbox0_base + 16 * cpu);
...@@ -223,6 +223,7 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { ...@@ -223,6 +223,7 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
.priority = 100, .priority = 100,
}; };
#ifdef CONFIG_ARM
int __init bcm2836_smp_boot_secondary(unsigned int cpu, int __init bcm2836_smp_boot_secondary(unsigned int cpu,
struct task_struct *idle) struct task_struct *idle)
{ {
...@@ -238,7 +239,7 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu, ...@@ -238,7 +239,7 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
static const struct smp_operations bcm2836_smp_ops __initconst = { static const struct smp_operations bcm2836_smp_ops __initconst = {
.smp_boot_secondary = bcm2836_smp_boot_secondary, .smp_boot_secondary = bcm2836_smp_boot_secondary,
}; };
#endif
#endif #endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = { static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
...@@ -252,12 +253,15 @@ bcm2836_arm_irqchip_smp_init(void) ...@@ -252,12 +253,15 @@ bcm2836_arm_irqchip_smp_init(void)
/* Unmask IPIs to the boot CPU. */ /* Unmask IPIs to the boot CPU. */
bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier, bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
CPU_STARTING, CPU_STARTING,
(void *)smp_processor_id()); (void *)(uintptr_t)smp_processor_id());
register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier); register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
#ifdef CONFIG_ARM
smp_set_ops(&bcm2836_smp_ops); smp_set_ops(&bcm2836_smp_ops);
#endif #endif
#endif
} }
/* /*
......
...@@ -183,7 +183,7 @@ static int crossbar_domain_translate(struct irq_domain *d, ...@@ -183,7 +183,7 @@ static int crossbar_domain_translate(struct irq_domain *d,
return -EINVAL; return -EINVAL;
*hwirq = fwspec->param[1]; *hwirq = fwspec->param[1];
*type = fwspec->param[2]; *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
return 0; return 0;
} }
......
...@@ -50,14 +50,26 @@ int gic_configure_irq(unsigned int irq, unsigned int type, ...@@ -50,14 +50,26 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
else if (type & IRQ_TYPE_EDGE_BOTH) else if (type & IRQ_TYPE_EDGE_BOTH)
val |= confmask; val |= confmask;
/* If the current configuration is the same, then we are done */
if (val == oldval)
return 0;
/* /*
* Write back the new configuration, and possibly re-enable * Write back the new configuration, and possibly re-enable
* the interrupt. If we tried to write a new configuration and failed, * the interrupt. If we fail to write a new configuration for
* return an error. * an SPI then WARN and return an error. If we fail to write the
* configuration for a PPI this is most likely because the GIC
* does not allow us to set the configuration or we are in a
* non-secure mode, and hence it may not be catastrophic.
*/ */
writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval) if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
if (WARN_ON(irq >= 32))
ret = -EINVAL; ret = -EINVAL;
else
pr_warn("GIC: PPI%d is secure or misconfigured\n",
irq - 16);
}
if (sync_access) if (sync_access)
sync_access(); sync_access();
......
...@@ -49,6 +49,9 @@ ...@@ -49,6 +49,9 @@
/* APM X-Gene with GICv2m MSI_IIDR register value */ /* APM X-Gene with GICv2m MSI_IIDR register value */
#define XGENE_GICV2M_MSI_IIDR 0x06000170 #define XGENE_GICV2M_MSI_IIDR 0x06000170
/* Broadcom NS2 GICv2m MSI_IIDR register value */
#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
/* List of flags for specific v2m implementation */ /* List of flags for specific v2m implementation */
#define GICV2M_NEEDS_SPI_OFFSET 0x00000001 #define GICV2M_NEEDS_SPI_OFFSET 0x00000001
...@@ -62,6 +65,7 @@ struct v2m_data { ...@@ -62,6 +65,7 @@ struct v2m_data {
void __iomem *base; /* GICv2m virt address */ void __iomem *base; /* GICv2m virt address */
u32 spi_start; /* The SPI number that MSIs start */ u32 spi_start; /* The SPI number that MSIs start */
u32 nr_spis; /* The number of SPIs for MSIs */ u32 nr_spis; /* The number of SPIs for MSIs */
u32 spi_offset; /* offset to be subtracted from SPI number */
unsigned long *bm; /* MSI vector bitmap */ unsigned long *bm; /* MSI vector bitmap */
u32 flags; /* v2m flags for specific implementation */ u32 flags; /* v2m flags for specific implementation */
}; };
...@@ -102,7 +106,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ...@@ -102,7 +106,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq; msg->data = data->hwirq;
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
msg->data -= v2m->spi_start; msg->data -= v2m->spi_offset;
} }
static struct irq_chip gicv2m_irq_chip = { static struct irq_chip gicv2m_irq_chip = {
...@@ -340,9 +344,20 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode, ...@@ -340,9 +344,20 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
* different from the standard GICv2m implementation where * different from the standard GICv2m implementation where
* the MSI data is the absolute value within the range from * the MSI data is the absolute value within the range from
* spi_start to (spi_start + num_spis). * spi_start to (spi_start + num_spis).
*
* Broadom NS2 GICv2m implementation has an erratum where the MSI data
* is 'spi_number - 32'
*/ */
if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR) switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
case XGENE_GICV2M_MSI_IIDR:
v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
v2m->spi_offset = v2m->spi_start;
break;
case BCM_NS2_GICV2M_MSI_IIDR:
v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
v2m->spi_offset = 32;
break;
}
v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
GFP_KERNEL); GFP_KERNEL);
......
...@@ -54,6 +54,16 @@ struct its_collection { ...@@ -54,6 +54,16 @@ struct its_collection {
u16 col_id; u16 col_id;
}; };
/*
* The ITS_BASER structure - contains memory information and cached
* value of BASER register configuration.
*/
struct its_baser {
void *base;
u64 val;
u32 order;
};
/* /*
* The ITS structure - contains most of the infrastructure, with the * The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the * top-level MSI domain, the command queue, the collections, and the
...@@ -66,14 +76,12 @@ struct its_node { ...@@ -66,14 +76,12 @@ struct its_node {
unsigned long phys_base; unsigned long phys_base;
struct its_cmd_block *cmd_base; struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write; struct its_cmd_block *cmd_write;
struct { struct its_baser tables[GITS_BASER_NR_REGS];
void *base;
u32 order;
} tables[GITS_BASER_NR_REGS];
struct its_collection *collections; struct its_collection *collections;
struct list_head its_device_list; struct list_head its_device_list;
u64 flags; u64 flags;
u32 ite_size; u32 ite_size;
u32 device_ids;
}; };
#define ITS_ITT_ALIGN SZ_256 #define ITS_ITT_ALIGN SZ_256
...@@ -838,6 +846,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) ...@@ -838,6 +846,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
ids = GITS_TYPER_DEVBITS(typer); ids = GITS_TYPER_DEVBITS(typer);
} }
its->device_ids = ids;
for (i = 0; i < GITS_BASER_NR_REGS; i++) { for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
u64 type = GITS_BASER_TYPE(val); u64 type = GITS_BASER_TYPE(val);
...@@ -913,6 +923,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its) ...@@ -913,6 +923,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
} }
val |= alloc_pages - 1; val |= alloc_pages - 1;
its->tables[i].val = val;
writeq_relaxed(val, its->base + GITS_BASER + i * 8); writeq_relaxed(val, its->base + GITS_BASER + i * 8);
tmp = readq_relaxed(its->base + GITS_BASER + i * 8); tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
...@@ -1138,9 +1149,22 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) ...@@ -1138,9 +1149,22 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
return its_dev; return its_dev;
} }
static struct its_baser *its_get_baser(struct its_node *its, u32 type)
{
int i;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
if (GITS_BASER_TYPE(its->tables[i].val) == type)
return &its->tables[i];
}
return NULL;
}
static struct its_device *its_create_device(struct its_node *its, u32 dev_id, static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs) int nvecs)
{ {
struct its_baser *baser;
struct its_device *dev; struct its_device *dev;
unsigned long *lpi_map; unsigned long *lpi_map;
unsigned long flags; unsigned long flags;
...@@ -1151,6 +1175,16 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, ...@@ -1151,6 +1175,16 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nr_ites; int nr_ites;
int sz; int sz;
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
/* Don't allow 'dev_id' that exceeds single, flat table limit */
if (baser) {
if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
GITS_BASER_ENTRY_SIZE(baser->val)))
return NULL;
} else if (ilog2(dev_id) >= its->device_ids)
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL); dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/* /*
* At least one bit of EventID is being used, hence a minimum * At least one bit of EventID is being used, hence a minimum
......
...@@ -367,6 +367,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs ...@@ -367,6 +367,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (static_key_true(&supports_deactivate)) if (static_key_true(&supports_deactivate))
gic_write_dir(irqnr); gic_write_dir(irqnr);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/*
* Unlike GICv2, we don't need an smp_rmb() here.
* The control dependency from gic_read_iar to
* the ISB in gic_write_eoir is enough to ensure
* that any shared data read by handle_IPI will
* be read after the ACK.
*/
handle_IPI(irqnr, regs); handle_IPI(irqnr, regs);
#else #else
WARN_ONCE(true, "Unexpected SGI received!\n"); WARN_ONCE(true, "Unexpected SGI received!\n");
...@@ -386,6 +393,15 @@ static void __init gic_dist_init(void) ...@@ -386,6 +393,15 @@ static void __init gic_dist_init(void)
writel_relaxed(0, base + GICD_CTLR); writel_relaxed(0, base + GICD_CTLR);
gic_dist_wait_for_rwp(); gic_dist_wait_for_rwp();
/*
* Configure SPIs as non-secure Group-1. This will only matter
* if the GIC only has a single security state. This will not
* do the right thing if the kernel is running in secure mode,
* but that's not the intended use case anyway.
*/
for (i = 32; i < gic_data.irq_nr; i += 32)
writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
/* Enable distributor with ARE, Group1 */ /* Enable distributor with ARE, Group1 */
...@@ -503,6 +519,9 @@ static void gic_cpu_init(void) ...@@ -503,6 +519,9 @@ static void gic_cpu_init(void)
rbase = gic_data_rdist_sgi_base(); rbase = gic_data_rdist_sgi_base();
/* Configure SGIs/PPIs as non-secure Group-1 */
writel_relaxed(~0, rbase + GICR_IGROUPR0);
gic_cpu_config(rbase, gic_redist_wait_for_rwp); gic_cpu_config(rbase, gic_redist_wait_for_rwp);
/* Give LPIs a spin */ /* Give LPIs a spin */
......
...@@ -72,6 +72,9 @@ struct gic_chip_data { ...@@ -72,6 +72,9 @@ struct gic_chip_data {
struct irq_chip chip; struct irq_chip chip;
union gic_base dist_base; union gic_base dist_base;
union gic_base cpu_base; union gic_base cpu_base;
void __iomem *raw_dist_base;
void __iomem *raw_cpu_base;
u32 percpu_offset;
#ifdef CONFIG_CPU_PM #ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
...@@ -344,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) ...@@ -344,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
if (static_key_true(&supports_deactivate)) if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/*
* Ensure any shared data written by the CPU sending
* the IPI is read after we've read the ACK register
* on the GIC.
*
* Pairs with the write barrier in gic_raise_softirq
*/
smp_rmb();
handle_IPI(irqnr, regs); handle_IPI(irqnr, regs);
#endif #endif
continue; continue;
...@@ -391,20 +402,6 @@ static struct irq_chip gic_chip = { ...@@ -391,20 +402,6 @@ static struct irq_chip gic_chip = {
IRQCHIP_MASK_ON_SUSPEND, IRQCHIP_MASK_ON_SUSPEND,
}; };
static struct irq_chip gic_eoimode1_chip = {
.name = "GICv2",
.irq_mask = gic_eoimode1_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoimode1_eoi_irq,
.irq_set_type = gic_set_type,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
};
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{ {
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
...@@ -473,7 +470,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic) ...@@ -473,7 +470,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
} }
static void gic_cpu_init(struct gic_chip_data *gic) static int gic_cpu_init(struct gic_chip_data *gic)
{ {
void __iomem *dist_base = gic_data_dist_base(gic); void __iomem *dist_base = gic_data_dist_base(gic);
void __iomem *base = gic_data_cpu_base(gic); void __iomem *base = gic_data_cpu_base(gic);
...@@ -489,7 +486,9 @@ static void gic_cpu_init(struct gic_chip_data *gic) ...@@ -489,7 +486,9 @@ static void gic_cpu_init(struct gic_chip_data *gic)
/* /*
* Get what the GIC says our CPU mask is. * Get what the GIC says our CPU mask is.
*/ */
BUG_ON(cpu >= NR_GIC_CPU_IF); if (WARN_ON(cpu >= NR_GIC_CPU_IF))
return -EINVAL;
cpu_mask = gic_get_cpumask(gic); cpu_mask = gic_get_cpumask(gic);
gic_cpu_map[cpu] = cpu_mask; gic_cpu_map[cpu] = cpu_mask;
...@@ -506,6 +505,8 @@ static void gic_cpu_init(struct gic_chip_data *gic) ...@@ -506,6 +505,8 @@ static void gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic); gic_cpu_if_up(gic);
return 0;
} }
int gic_cpu_if_down(unsigned int gic_nr) int gic_cpu_if_down(unsigned int gic_nr)
...@@ -531,34 +532,35 @@ int gic_cpu_if_down(unsigned int gic_nr) ...@@ -531,34 +532,35 @@ int gic_cpu_if_down(unsigned int gic_nr)
* this function, no interrupts will be delivered by the GIC, and another * this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled. * platform-specific wakeup source must be enabled.
*/ */
static void gic_dist_save(unsigned int gic_nr) static void gic_dist_save(struct gic_chip_data *gic)
{ {
unsigned int gic_irqs; unsigned int gic_irqs;
void __iomem *dist_base; void __iomem *dist_base;
int i; int i;
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); if (WARN_ON(!gic))
return;
gic_irqs = gic_data[gic_nr].gic_irqs; gic_irqs = gic->gic_irqs;
dist_base = gic_data_dist_base(&gic_data[gic_nr]); dist_base = gic_data_dist_base(gic);
if (!dist_base) if (!dist_base)
return; return;
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
gic_data[gic_nr].saved_spi_conf[i] = gic->saved_spi_conf[i] =
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
gic_data[gic_nr].saved_spi_target[i] = gic->saved_spi_target[i] =
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
gic_data[gic_nr].saved_spi_enable[i] = gic->saved_spi_enable[i] =
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
gic_data[gic_nr].saved_spi_active[i] = gic->saved_spi_active[i] =
readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
} }
...@@ -569,16 +571,17 @@ static void gic_dist_save(unsigned int gic_nr) ...@@ -569,16 +571,17 @@ static void gic_dist_save(unsigned int gic_nr)
* handled normally, but any edge interrupts that occured will not be seen by * handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source. * the GIC and need to be handled by the platform-specific wakeup source.
*/ */
static void gic_dist_restore(unsigned int gic_nr) static void gic_dist_restore(struct gic_chip_data *gic)
{ {
unsigned int gic_irqs; unsigned int gic_irqs;
unsigned int i; unsigned int i;
void __iomem *dist_base; void __iomem *dist_base;
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); if (WARN_ON(!gic))
return;
gic_irqs = gic_data[gic_nr].gic_irqs; gic_irqs = gic->gic_irqs;
dist_base = gic_data_dist_base(&gic_data[gic_nr]); dist_base = gic_data_dist_base(gic);
if (!dist_base) if (!dist_base)
return; return;
...@@ -586,7 +589,7 @@ static void gic_dist_restore(unsigned int gic_nr) ...@@ -586,7 +589,7 @@ static void gic_dist_restore(unsigned int gic_nr)
writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], writel_relaxed(gic->saved_spi_conf[i],
dist_base + GIC_DIST_CONFIG + i * 4); dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
...@@ -594,85 +597,87 @@ static void gic_dist_restore(unsigned int gic_nr) ...@@ -594,85 +597,87 @@ static void gic_dist_restore(unsigned int gic_nr)
dist_base + GIC_DIST_PRI + i * 4); dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_target[i], writel_relaxed(gic->saved_spi_target[i],
dist_base + GIC_DIST_TARGET + i * 4); dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32, writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], writel_relaxed(gic->saved_spi_enable[i],
dist_base + GIC_DIST_ENABLE_SET + i * 4); dist_base + GIC_DIST_ENABLE_SET + i * 4);
} }
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32, writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
writel_relaxed(gic_data[gic_nr].saved_spi_active[i], writel_relaxed(gic->saved_spi_active[i],
dist_base + GIC_DIST_ACTIVE_SET + i * 4); dist_base + GIC_DIST_ACTIVE_SET + i * 4);
} }
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
} }
static void gic_cpu_save(unsigned int gic_nr) static void gic_cpu_save(struct gic_chip_data *gic)
{ {
int i; int i;
u32 *ptr; u32 *ptr;
void __iomem *dist_base; void __iomem *dist_base;
void __iomem *cpu_base; void __iomem *cpu_base;
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); if (WARN_ON(!gic))
return;
dist_base = gic_data_dist_base(&gic_data[gic_nr]); dist_base = gic_data_dist_base(gic);
cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); cpu_base = gic_data_cpu_base(gic);
if (!dist_base || !cpu_base) if (!dist_base || !cpu_base)
return; return;
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++) for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
} }
static void gic_cpu_restore(unsigned int gic_nr) static void gic_cpu_restore(struct gic_chip_data *gic)
{ {
int i; int i;
u32 *ptr; u32 *ptr;
void __iomem *dist_base; void __iomem *dist_base;
void __iomem *cpu_base; void __iomem *cpu_base;
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); if (WARN_ON(!gic))
return;
dist_base = gic_data_dist_base(&gic_data[gic_nr]); dist_base = gic_data_dist_base(gic);
cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); cpu_base = gic_data_cpu_base(gic);
if (!dist_base || !cpu_base) if (!dist_base || !cpu_base)
return; return;
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32, writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
} }
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32, writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
} }
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++) for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
...@@ -681,7 +686,7 @@ static void gic_cpu_restore(unsigned int gic_nr) ...@@ -681,7 +686,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
dist_base + GIC_DIST_PRI + i * 4); dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
gic_cpu_if_up(&gic_data[gic_nr]); gic_cpu_if_up(gic);
} }
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
...@@ -696,18 +701,18 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) ...@@ -696,18 +701,18 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
#endif #endif
switch (cmd) { switch (cmd) {
case CPU_PM_ENTER: case CPU_PM_ENTER:
gic_cpu_save(i); gic_cpu_save(&gic_data[i]);
break; break;
case CPU_PM_ENTER_FAILED: case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT: case CPU_PM_EXIT:
gic_cpu_restore(i); gic_cpu_restore(&gic_data[i]);
break; break;
case CPU_CLUSTER_PM_ENTER: case CPU_CLUSTER_PM_ENTER:
gic_dist_save(i); gic_dist_save(&gic_data[i]);
break; break;
case CPU_CLUSTER_PM_ENTER_FAILED: case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT: case CPU_CLUSTER_PM_EXIT:
gic_dist_restore(i); gic_dist_restore(&gic_data[i]);
break; break;
} }
} }
...@@ -719,26 +724,39 @@ static struct notifier_block gic_notifier_block = { ...@@ -719,26 +724,39 @@ static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier, .notifier_call = gic_notifier,
}; };
static void __init gic_pm_init(struct gic_chip_data *gic) static int __init gic_pm_init(struct gic_chip_data *gic)
{ {
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32)); sizeof(u32));
BUG_ON(!gic->saved_ppi_enable); if (WARN_ON(!gic->saved_ppi_enable))
return -ENOMEM;
gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32)); sizeof(u32));
BUG_ON(!gic->saved_ppi_active); if (WARN_ON(!gic->saved_ppi_active))
goto free_ppi_enable;
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
sizeof(u32)); sizeof(u32));
BUG_ON(!gic->saved_ppi_conf); if (WARN_ON(!gic->saved_ppi_conf))
goto free_ppi_active;
if (gic == &gic_data[0]) if (gic == &gic_data[0])
cpu_pm_register_notifier(&gic_notifier_block); cpu_pm_register_notifier(&gic_notifier_block);
return 0;
free_ppi_active:
free_percpu(gic->saved_ppi_active);
free_ppi_enable:
free_percpu(gic->saved_ppi_enable);
return -ENOMEM;
} }
#else #else
static void __init gic_pm_init(struct gic_chip_data *gic) static int __init gic_pm_init(struct gic_chip_data *gic)
{ {
return 0;
} }
#endif #endif
...@@ -1011,63 +1029,65 @@ static const struct irq_domain_ops gic_irq_domain_ops = { ...@@ -1011,63 +1029,65 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap, .unmap = gic_irq_domain_unmap,
}; };
static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base, struct fwnode_handle *handle)
u32 percpu_offset, struct fwnode_handle *handle)
{ {
irq_hw_number_t hwirq_base; irq_hw_number_t hwirq_base;
struct gic_chip_data *gic; int gic_irqs, irq_base, i, ret;
int gic_irqs, irq_base, i;
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); if (WARN_ON(!gic || gic->domain))
return -EINVAL;
gic_check_cpu_features(); gic_check_cpu_features();
gic = &gic_data[gic_nr];
/* Initialize irq_chip */ /* Initialize irq_chip */
if (static_key_true(&supports_deactivate) && gic_nr == 0) {
gic->chip = gic_eoimode1_chip;
} else {
gic->chip = gic_chip; gic->chip = gic_chip;
gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
gic->chip.irq_mask = gic_eoimode1_mask_irq;
gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
} else {
gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
(int)(gic - &gic_data[0]));
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (gic_nr == 0) if (gic == &gic_data[0])
gic->chip.irq_set_affinity = gic_set_affinity; gic->chip.irq_set_affinity = gic_set_affinity;
#endif #endif
#ifdef CONFIG_GIC_NON_BANKED if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
if (percpu_offset) { /* Frankein-GIC without banked registers... */ /* Frankein-GIC without banked registers... */
unsigned int cpu; unsigned int cpu;
gic->dist_base.percpu_base = alloc_percpu(void __iomem *); gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
if (WARN_ON(!gic->dist_base.percpu_base || if (WARN_ON(!gic->dist_base.percpu_base ||
!gic->cpu_base.percpu_base)) { !gic->cpu_base.percpu_base)) {
free_percpu(gic->dist_base.percpu_base); ret = -ENOMEM;
free_percpu(gic->cpu_base.percpu_base); goto error;
return;
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
u32 mpidr = cpu_logical_map(cpu); u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
unsigned long offset = percpu_offset * core_id; unsigned long offset = gic->percpu_offset * core_id;
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; gic->raw_dist_base + offset;
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
gic->raw_cpu_base + offset;
} }
gic_set_base_accessor(gic, gic_get_percpu_base); gic_set_base_accessor(gic, gic_get_percpu_base);
} else } else {
#endif /* Normal, sane GIC... */
{ /* Normal, sane GIC... */ WARN(gic->percpu_offset,
WARN(percpu_offset,
"GIC_NON_BANKED not enabled, ignoring %08x offset!", "GIC_NON_BANKED not enabled, ignoring %08x offset!",
percpu_offset); gic->percpu_offset);
gic->dist_base.common_base = dist_base; gic->dist_base.common_base = gic->raw_dist_base;
gic->cpu_base.common_base = cpu_base; gic->cpu_base.common_base = gic->raw_cpu_base;
gic_set_base_accessor(gic, gic_get_common_base); gic_set_base_accessor(gic, gic_get_common_base);
} }
...@@ -1090,7 +1110,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, ...@@ -1090,7 +1110,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs. * For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too. * For secondary GICs, skip over PPIs, too.
*/ */
if (gic_nr == 0 && (irq_start & 31) > 0) { if (gic == &gic_data[0] && (irq_start & 31) > 0) {
hwirq_base = 16; hwirq_base = 16;
if (irq_start != -1) if (irq_start != -1)
irq_start = (irq_start & ~31) + 16; irq_start = (irq_start & ~31) + 16;
...@@ -1112,10 +1132,12 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, ...@@ -1112,10 +1132,12 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
hwirq_base, &gic_irq_domain_ops, gic); hwirq_base, &gic_irq_domain_ops, gic);
} }
if (WARN_ON(!gic->domain)) if (WARN_ON(!gic->domain)) {
return; ret = -ENODEV;
goto error;
}
if (gic_nr == 0) { if (gic == &gic_data[0]) {
/* /*
* Initialize the CPU interface map to all CPUs. * Initialize the CPU interface map to all CPUs.
* It will be refined as each CPU probes its ID. * It will be refined as each CPU probes its ID.
...@@ -1133,19 +1155,57 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, ...@@ -1133,19 +1155,57 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
} }
gic_dist_init(gic); gic_dist_init(gic);
gic_cpu_init(gic); ret = gic_cpu_init(gic);
gic_pm_init(gic); if (ret)
goto error;
ret = gic_pm_init(gic);
if (ret)
goto error;
return 0;
error:
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
free_percpu(gic->dist_base.percpu_base);
free_percpu(gic->cpu_base.percpu_base);
}
kfree(gic->chip.name);
return ret;
} }
void __init gic_init(unsigned int gic_nr, int irq_start, void __init gic_init(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base) void __iomem *dist_base, void __iomem *cpu_base)
{ {
struct gic_chip_data *gic;
if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
return;
/* /*
* Non-DT/ACPI systems won't run a hypervisor, so let's not * Non-DT/ACPI systems won't run a hypervisor, so let's not
* bother with these... * bother with these...
*/ */
static_key_slow_dec(&supports_deactivate); static_key_slow_dec(&supports_deactivate);
__gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
gic = &gic_data[gic_nr];
gic->raw_dist_base = dist_base;
gic->raw_cpu_base = cpu_base;
__gic_init_bases(gic, irq_start, NULL);
}
static void gic_teardown(struct gic_chip_data *gic)
{
if (WARN_ON(!gic))
return;
if (gic->raw_dist_base)
iounmap(gic->raw_dist_base);
if (gic->raw_cpu_base)
iounmap(gic->raw_cpu_base);
} }
#ifdef CONFIG_OF #ifdef CONFIG_OF
...@@ -1189,35 +1249,61 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) ...@@ -1189,35 +1249,61 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true; return true;
} }
static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
{
if (!gic || !node)
return -EINVAL;
gic->raw_dist_base = of_iomap(node, 0);
if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
goto error;
gic->raw_cpu_base = of_iomap(node, 1);
if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
goto error;
if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
gic->percpu_offset = 0;
return 0;
error:
gic_teardown(gic);
return -ENOMEM;
}
int __init int __init
gic_of_init(struct device_node *node, struct device_node *parent) gic_of_init(struct device_node *node, struct device_node *parent)
{ {
void __iomem *cpu_base; struct gic_chip_data *gic;
void __iomem *dist_base; int irq, ret;
u32 percpu_offset;
int irq;
if (WARN_ON(!node)) if (WARN_ON(!node))
return -ENODEV; return -ENODEV;
dist_base = of_iomap(node, 0); if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
WARN(!dist_base, "unable to map gic dist registers\n"); return -EINVAL;
cpu_base = of_iomap(node, 1); gic = &gic_data[gic_cnt];
WARN(!cpu_base, "unable to map gic cpu registers\n");
ret = gic_of_setup(gic, node);
if (ret)
return ret;
/* /*
* Disable split EOI/Deactivate if either HYP is not available * Disable split EOI/Deactivate if either HYP is not available
* or the CPU interface is too small. * or the CPU interface is too small.
*/ */
if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
static_key_slow_dec(&supports_deactivate); static_key_slow_dec(&supports_deactivate);
if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) ret = __gic_init_bases(gic, -1, &node->fwnode);
percpu_offset = 0; if (ret) {
gic_teardown(gic);
return ret;
}
__gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
&node->fwnode);
if (!gic_cnt) if (!gic_cnt)
gic_init_physaddr(node); gic_init_physaddr(node);
...@@ -1304,9 +1390,9 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, ...@@ -1304,9 +1390,9 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
const unsigned long end) const unsigned long end)
{ {
struct acpi_madt_generic_distributor *dist; struct acpi_madt_generic_distributor *dist;
void __iomem *cpu_base, *dist_base;
struct fwnode_handle *domain_handle; struct fwnode_handle *domain_handle;
int count; struct gic_chip_data *gic = &gic_data[0];
int count, ret;
/* Collect CPU base addresses */ /* Collect CPU base addresses */
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
...@@ -1316,17 +1402,18 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, ...@@ -1316,17 +1402,18 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
return -EINVAL; return -EINVAL;
} }
cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE); gic->raw_cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
if (!cpu_base) { if (!gic->raw_cpu_base) {
pr_err("Unable to map GICC registers\n"); pr_err("Unable to map GICC registers\n");
return -ENOMEM; return -ENOMEM;
} }
dist = (struct acpi_madt_generic_distributor *)header; dist = (struct acpi_madt_generic_distributor *)header;
dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE); gic->raw_dist_base = ioremap(dist->base_address,
if (!dist_base) { ACPI_GICV2_DIST_MEM_SIZE);
if (!gic->raw_dist_base) {
pr_err("Unable to map GICD registers\n"); pr_err("Unable to map GICD registers\n");
iounmap(cpu_base); gic_teardown(gic);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1341,15 +1428,20 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, ...@@ -1341,15 +1428,20 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
/* /*
* Initialize GIC instance zero (no multi-GIC support). * Initialize GIC instance zero (no multi-GIC support).
*/ */
domain_handle = irq_domain_alloc_fwnode(dist_base); domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
if (!domain_handle) { if (!domain_handle) {
pr_err("Unable to allocate domain handle\n"); pr_err("Unable to allocate domain handle\n");
iounmap(cpu_base); gic_teardown(gic);
iounmap(dist_base);
return -ENOMEM; return -ENOMEM;
} }
__gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle); ret = __gic_init_bases(gic, -1, domain_handle);
if (ret) {
pr_err("Failed to initialise GIC\n");
irq_domain_free_fwnode(domain_handle);
gic_teardown(gic);
return ret;
}
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
......
/*
* Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/exception.h>
#define LPC32XX_INTC_MASK 0x00
#define LPC32XX_INTC_RAW 0x04
#define LPC32XX_INTC_STAT 0x08
#define LPC32XX_INTC_POL 0x0C
#define LPC32XX_INTC_TYPE 0x10
#define LPC32XX_INTC_FIQ 0x14
#define NR_LPC32XX_IC_IRQS 32
struct lpc32xx_irq_chip {
void __iomem *base;
struct irq_domain *domain;
struct irq_chip chip;
};
static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
{
return readl_relaxed(ic->base + reg);
}
static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
u32 reg, u32 val)
{
writel_relaxed(val, ic->base + reg);
}
static void lpc32xx_irq_mask(struct irq_data *d)
{
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
u32 val, mask = BIT(d->hwirq);
val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
}
static void lpc32xx_irq_unmask(struct irq_data *d)
{
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
u32 val, mask = BIT(d->hwirq);
val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
}
static void lpc32xx_irq_ack(struct irq_data *d)
{
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
u32 mask = BIT(d->hwirq);
lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
}
static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
{
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
u32 val, mask = BIT(d->hwirq);
bool high, edge;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
edge = true;
high = true;
break;
case IRQ_TYPE_EDGE_FALLING:
edge = true;
high = false;
break;
case IRQ_TYPE_LEVEL_HIGH:
edge = false;
high = true;
break;
case IRQ_TYPE_LEVEL_LOW:
edge = false;
high = false;
break;
default:
pr_info("unsupported irq type %d\n", type);
return -EINVAL;
}
irqd_set_trigger_type(d, type);
val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
if (high)
val |= mask;
else
val &= ~mask;
lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
if (edge) {
val |= mask;
irq_set_handler_locked(d, handle_edge_irq);
} else {
val &= ~mask;
irq_set_handler_locked(d, handle_level_irq);
}
lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
return 0;
}
static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
{
struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
while (hwirq) {
irq = __ffs(hwirq);
hwirq &= ~BIT(irq);
handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
}
}
static void lpc32xx_sic_handler(struct irq_desc *desc)
{
struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
chained_irq_enter(chip, desc);
while (hwirq) {
irq = __ffs(hwirq);
hwirq &= ~BIT(irq);
generic_handle_irq(irq_find_mapping(ic->domain, irq));
}
chained_irq_exit(chip, desc);
}
static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
irq_hw_number_t hw)
{
struct lpc32xx_irq_chip *ic = id->host_data;
irq_set_chip_data(virq, ic);
irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_noprobe(virq);
return 0;
}
static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
{
irq_set_chip_and_handler(virq, NULL, NULL);
}
static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
.map = lpc32xx_irq_domain_map,
.unmap = lpc32xx_irq_domain_unmap,
.xlate = irq_domain_xlate_twocell,
};
static int __init lpc32xx_of_ic_init(struct device_node *node,
struct device_node *parent)
{
struct lpc32xx_irq_chip *irqc;
bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
const __be32 *reg = of_get_property(node, "reg", NULL);
u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc)
return -ENOMEM;
irqc->base = of_iomap(node, 0);
if (!irqc->base) {
pr_err("%s: unable to map registers\n", node->full_name);
kfree(irqc);
return -EINVAL;
}
irqc->chip.irq_ack = lpc32xx_irq_ack;
irqc->chip.irq_mask = lpc32xx_irq_mask;
irqc->chip.irq_unmask = lpc32xx_irq_unmask;
irqc->chip.irq_set_type = lpc32xx_irq_set_type;
if (is_mic)
irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
else
irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
&lpc32xx_irq_domain_ops, irqc);
if (!irqc->domain) {
pr_err("unable to add irq domain\n");
iounmap(irqc->base);
kfree(irqc->chip.name);
kfree(irqc);
return -ENODEV;
}
if (is_mic) {
lpc32xx_mic_irqc = irqc;
set_handle_irq(lpc32xx_handle_irq);
} else {
for (i = 0; i < of_irq_count(node); i++) {
parent_irq = irq_of_parse_and_map(node, i);
if (parent_irq)
irq_set_chained_handler_and_data(parent_irq,
lpc32xx_sic_handler, irqc);
}
}
lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
lpc32xx_ic_write(irqc, LPC32XX_INTC_POL, 0x00);
lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
return 0;
}
IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
/*
* Freescale SCFG MSI(-X) support
*
* Copyright (C) 2016 Freescale Semiconductor.
*
* Author: Minghuan Lian <Minghuan.Lian@nxp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
#define MSI_MAX_IRQS 32
#define MSI_IBS_SHIFT 3
#define MSIR 4
struct ls_scfg_msi {
spinlock_t lock;
struct platform_device *pdev;
struct irq_domain *parent;
struct irq_domain *msi_domain;
void __iomem *regs;
phys_addr_t msiir_addr;
int irq;
DECLARE_BITMAP(used, MSI_MAX_IRQS);
};
static struct irq_chip ls_scfg_msi_irq_chip = {
.name = "MSI",
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
static struct msi_domain_info ls_scfg_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX),
.chip = &ls_scfg_msi_irq_chip,
};
static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
msg->address_hi = upper_32_bits(msi_data->msiir_addr);
msg->address_lo = lower_32_bits(msi_data->msiir_addr);
msg->data = data->hwirq << MSI_IBS_SHIFT;
}
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
const struct cpumask *mask, bool force)
{
return -EINVAL;
}
static struct irq_chip ls_scfg_msi_parent_chip = {
.name = "SCFG",
.irq_compose_msi_msg = ls_scfg_msi_compose_msg,
.irq_set_affinity = ls_scfg_msi_set_affinity,
};
static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs,
void *args)
{
struct ls_scfg_msi *msi_data = domain->host_data;
int pos, err = 0;
WARN_ON(nr_irqs != 1);
spin_lock(&msi_data->lock);
pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
if (pos < MSI_MAX_IRQS)
__set_bit(pos, msi_data->used);
else
err = -ENOSPC;
spin_unlock(&msi_data->lock);
if (err)
return err;
irq_domain_set_info(domain, virq, pos,
&ls_scfg_msi_parent_chip, msi_data,
handle_simple_irq, NULL, NULL);
return 0;
}
static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
int pos;
pos = d->hwirq;
if (pos < 0 || pos >= MSI_MAX_IRQS) {
pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
return;
}
spin_lock(&msi_data->lock);
__clear_bit(pos, msi_data->used);
spin_unlock(&msi_data->lock);
}
static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
.alloc = ls_scfg_msi_domain_irq_alloc,
.free = ls_scfg_msi_domain_irq_free,
};
static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
{
struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
unsigned long val;
int pos, virq;
chained_irq_enter(irq_desc_get_chip(desc), desc);
val = ioread32be(msi_data->regs + MSIR);
for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
virq = irq_find_mapping(msi_data->parent, (31 - pos));
if (virq)
generic_handle_irq(virq);
}
chained_irq_exit(irq_desc_get_chip(desc), desc);
}
static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
{
/* Initialize MSI domain parent */
msi_data->parent = irq_domain_add_linear(NULL,
MSI_MAX_IRQS,
&ls_scfg_msi_domain_ops,
msi_data);
if (!msi_data->parent) {
dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
msi_data->msi_domain = pci_msi_create_irq_domain(
of_node_to_fwnode(msi_data->pdev->dev.of_node),
&ls_scfg_msi_domain_info,
msi_data->parent);
if (!msi_data->msi_domain) {
dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
irq_domain_remove(msi_data->parent);
return -ENOMEM;
}
return 0;
}
static int ls_scfg_msi_probe(struct platform_device *pdev)
{
struct ls_scfg_msi *msi_data;
struct resource *res;
int ret;
msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
if (!msi_data)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi_data->regs)) {
dev_err(&pdev->dev, "failed to initialize 'regs'\n");
return PTR_ERR(msi_data->regs);
}
msi_data->msiir_addr = res->start;
msi_data->irq = platform_get_irq(pdev, 0);
if (msi_data->irq <= 0) {
dev_err(&pdev->dev, "failed to get MSI irq\n");
return -ENODEV;
}
msi_data->pdev = pdev;
spin_lock_init(&msi_data->lock);
ret = ls_scfg_msi_domains_init(msi_data);
if (ret)
return ret;
irq_set_chained_handler_and_data(msi_data->irq,
ls_scfg_msi_irq_handler,
msi_data);
platform_set_drvdata(pdev, msi_data);
return 0;
}
static int ls_scfg_msi_remove(struct platform_device *pdev)
{
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
irq_domain_remove(msi_data->msi_domain);
irq_domain_remove(msi_data->parent);
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id ls_scfg_msi_id[] = {
{ .compatible = "fsl,1s1021a-msi", },
{ .compatible = "fsl,1s1043a-msi", },
{},
};
static struct platform_driver ls_scfg_msi_driver = {
.driver = {
.name = "ls-scfg-msi",
.of_match_table = ls_scfg_msi_id,
},
.probe = ls_scfg_msi_probe,
.remove = ls_scfg_msi_remove,
};
module_platform_driver(ls_scfg_msi_driver);
MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
MODULE_LICENSE("GPL v2");
...@@ -263,8 +263,8 @@ static int mbigen_device_probe(struct platform_device *pdev) ...@@ -263,8 +263,8 @@ static int mbigen_device_probe(struct platform_device *pdev)
parent = platform_bus_type.dev_root; parent = platform_bus_type.dev_root;
child = of_platform_device_create(np, NULL, parent); child = of_platform_device_create(np, NULL, parent);
if (IS_ERR(child)) if (!child)
return PTR_ERR(child); return -ENOMEM;
if (of_property_read_u32(child->dev.of_node, "num-pins", if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) { &num_pins) < 0) {
......
...@@ -235,7 +235,7 @@ static int tegra_ictlr_domain_translate(struct irq_domain *d, ...@@ -235,7 +235,7 @@ static int tegra_ictlr_domain_translate(struct irq_domain *d,
return -EINVAL; return -EINVAL;
*hwirq = fwspec->param[1]; *hwirq = fwspec->param[1];
*type = fwspec->param[2]; *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
return 0; return 0;
} }
......
...@@ -102,8 +102,6 @@ ...@@ -102,8 +102,6 @@
#define GICR_SYNCR 0x00C0 #define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100 #define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110 #define GICR_MOVALLR 0x0110
#define GICR_ISACTIVER GICD_ISACTIVER
#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS #define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2 #define GICR_PIDR2 GICD_PIDR2
......
...@@ -1407,7 +1407,7 @@ int setup_irq(unsigned int irq, struct irqaction *act) ...@@ -1407,7 +1407,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
int retval; int retval;
struct irq_desc *desc = irq_to_desc(irq); struct irq_desc *desc = irq_to_desc(irq);
if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL; return -EINVAL;
chip_bus_lock(desc); chip_bus_lock(desc);
retval = __setup_irq(irq, desc, act); retval = __setup_irq(irq, desc, act);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment