Commit ac7473a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull interrupt subsystem updates from Thomas Gleixner:
 "Core:

   - Provide a new mechanism to create interrupt domains. The existing
     interfaces have already too many parameters and it's a pain to
     expand any of this for new required functionality.

     The new function takes a pointer to a data structure as argument.
     The data structure combines all existing parameters and allows for
     easy extension.

     The first extension for this is to handle the instantiation of
     generic interrupt chips at the core level and to allow drivers to
     provide extra init/exit callbacks.

     This is necessary to do the full interrupt chip initialization
     before the new domain is published, so that concurrent usage sites
     won't see a half initialized interrupt domain. Similar problems
     exist on teardown.

     This has turned out to be a real problem due to the deferred and
     parallel probing which was added in recent years.

     Handling this at the core level allows to remove quite some accrued
     boilerplate code in existing drivers and avoids horrible
     workarounds at the driver level.

   - The usual small improvements all over the place

  Drivers:

   - Add support for LAN966x OIC and RZ/Five SoC

   - Split the STM ExtI driver into a microcontroller and a SMP version
     to allow building the latter as a module for multi-platform
     kernels

   - Enable MSI support for Armada 370XP on platforms which do not
     support IPIs

   - The usual small fixes and enhancements all over the place"

* tag 'irq-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (59 commits)
  irqdomain: Fix the kernel-doc and plug it into Documentation
  genirq: Set IRQF_COND_ONESHOT in request_irq()
  irqchip/imx-irqsteer: Handle runtime power management correctly
  irqchip/gic-v3: Pass #redistributor-regions to gic_of_setup_kvm_info()
  irqchip/bcm2835: Enable SKIP_SET_WAKE and MASK_ON_SUSPEND
  irqchip/gic-v4: Make sure a VPE is locked when VMAPP is issued
  irqchip/gic-v4: Substitute vmovp_lock for a per-VM lock
  irqchip/gic-v4: Always configure affinity on VPE activation
  Revert "irqchip/dw-apb-ictl: Support building as module"
  Revert "Loongarch: Support loongarch avec"
  arm64: Kconfig: Allow build irq-stm32mp-exti driver as module
  ARM: stm32: Allow build irq-stm32mp-exti driver as module
  irqchip/stm32mp-exti: Allow building as module
  irqchip/stm32mp-exti: Rename internal symbols
  irqchip/stm32-exti: Split MCU and MPU code
  arm64: Kconfig: Select STM32MP_EXTI on STM32 platforms
  ARM: stm32: Use different EXTI driver on ARMv7m and ARMv7a
  irqchip/stm32-exti: Add CONFIG_STM32MP_EXTI
  irqchip/dw-apb-ictl: Support building as module
  irqchip/riscv-aplic: Simplify the initialization code
  ...
parents a362ade8 b7b37733
......@@ -410,6 +410,8 @@ which are used in the generic IRQ layer.
.. kernel-doc:: include/linux/interrupt.h
:internal:
.. kernel-doc:: include/linux/irqdomain.h
Public Functions Provided
=========================
......
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/interrupt-controller/microchip,lan966x-oic.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip LAN966x outband interrupt controller
maintainers:
- Herve Codina <herve.codina@bootlin.com>
allOf:
- $ref: /schemas/interrupt-controller.yaml#
description: |
The Microchip LAN966x outband interrupt controller (OIC) maps the internal
interrupt sources of the LAN966x device to an external interrupt.
When the LAN966x device is used as a PCI device, the external interrupt is
routed to the PCI interrupt.
properties:
compatible:
const: microchip,lan966x-oic
'#interrupt-cells':
const: 2
interrupt-controller: true
reg:
maxItems: 1
interrupts:
maxItems: 1
required:
- compatible
- '#interrupt-cells'
- interrupt-controller
- interrupts
- reg
additionalProperties: false
examples:
- |
interrupt-controller@e00c0120 {
compatible = "microchip,lan966x-oic";
reg = <0xe00c0120 0x190>;
#interrupt-cells = <2>;
interrupt-controller;
interrupts = <0>;
interrupt-parent = <&intc>;
};
...
......@@ -21,13 +21,16 @@ description: |
properties:
compatible:
items:
- enum:
- renesas,r9a07g043u-irqc # RZ/G2UL
- renesas,r9a07g044-irqc # RZ/G2{L,LC}
- renesas,r9a07g054-irqc # RZ/V2L
- renesas,r9a08g045-irqc # RZ/G3S
- const: renesas,rzg2l-irqc
oneOf:
- items:
- enum:
- renesas,r9a07g043u-irqc # RZ/G2UL
- renesas,r9a07g044-irqc # RZ/G2{L,LC}
- renesas,r9a07g054-irqc # RZ/V2L
- renesas,r9a08g045-irqc # RZ/G3S
- const: renesas,rzg2l-irqc
- const: renesas,r9a07g043f-irqc # RZ/Five
'#interrupt-cells':
description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the
......
......@@ -14942,6 +14942,12 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/microchip/lan966x/*
MICROCHIP LAN966X OIC DRIVER
M: Herve Codina <herve.codina@bootlin.com>
S: Maintained
F: Documentation/devicetree/bindings/interrupt-controller/microchip,lan966x-oic.yaml
F: drivers/irqchip/irq-lan966x-oic.c
MICROCHIP LCDFB DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-fbdev@vger.kernel.org
......
......@@ -11,7 +11,7 @@ menuconfig ARCH_STM32
select CLKSRC_STM32
select PINCTRL
select RESET_CONTROLLER
select STM32_EXTI
select STM32_EXTI if ARM_SINGLE_ARMV7M
select STM32_FIREWALL
help
Support for STMicroelectronics STM32 processors.
......
......@@ -309,7 +309,6 @@ config ARCH_STM32
select GPIOLIB
select PINCTRL
select PINCTRL_STM32MP257
select STM32_EXTI
select ARM_SMC_MBOX
select ARM_SCMI_PROTOCOL
select REGULATOR
......
......@@ -988,6 +988,11 @@ static struct resource virt_platform_resource = {
static int __init um_pci_init(void)
{
struct irq_domain_info inner_domain_info = {
.size = MAX_MSI_VECTORS,
.hwirq_max = MAX_MSI_VECTORS,
.ops = &um_pci_inner_domain_ops,
};
int err, i;
WARN_ON(logic_iomem_add_region(&virt_cfgspace_resource,
......@@ -1017,11 +1022,10 @@ static int __init um_pci_init(void)
goto free;
}
um_pci_inner_domain = __irq_domain_add(um_pci_fwnode, MAX_MSI_VECTORS,
MAX_MSI_VECTORS, 0,
&um_pci_inner_domain_ops, NULL);
if (!um_pci_inner_domain) {
err = -ENOMEM;
inner_domain_info.fwnode = um_pci_fwnode;
um_pci_inner_domain = irq_domain_instantiate(&inner_domain_info);
if (IS_ERR(um_pci_inner_domain)) {
err = PTR_ERR(um_pci_inner_domain);
goto free;
}
......@@ -1058,7 +1062,7 @@ static int __init um_pci_init(void)
goto free;
return 0;
free:
if (um_pci_inner_domain)
if (!IS_ERR_OR_NULL(um_pci_inner_domain))
irq_domain_remove(um_pci_inner_domain);
if (um_pci_fwnode)
irq_domain_free_fwnode(um_pci_fwnode);
......
......@@ -216,6 +216,21 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
return rv;
}
int __init acpi_get_madt_revision(void)
{
struct acpi_table_header *madt = NULL;
int revision;
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, &madt)))
return -EINVAL;
revision = madt->revision;
acpi_put_table(madt);
return revision;
}
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
......
......@@ -169,6 +169,18 @@ config IXP4XX_IRQ
select IRQ_DOMAIN
select SPARSE_IRQ
config LAN966X_OIC
tristate "Microchip LAN966x OIC Support"
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
Enable support for the LAN966x Outbound Interrupt Controller.
This controller is present on the Microchip LAN966x PCI device and
maps the internal interrupts sources to PCIe interrupt.
To compile this driver as a module, choose M here: the module
will be called irq-lan966x-oic.
config MADERA_IRQ
tristate
......@@ -392,6 +404,15 @@ config LS_SCFG_MSI
config PARTITION_PERCPU
bool
config STM32MP_EXTI
tristate "STM32MP extended interrupts and event controller"
depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
default y
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_CHIP
help
Support STM32MP EXTI (extended interrupts and event) controller.
config STM32_EXTI
bool
select IRQ_DOMAIN
......
......@@ -84,6 +84,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
obj-$(CONFIG_STM32MP_EXTI) += irq-stm32mp-exti.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
......@@ -104,6 +105,7 @@ obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LAN966X_OIC) += irq-lan966x-oic.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
......
......@@ -13,6 +13,7 @@
* warranty of any kind, whether express or implied.
*/
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -29,6 +30,7 @@
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/msi.h>
#include <linux/types.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
......@@ -135,6 +137,7 @@
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
/* IPI and MSI interrupt definitions for IPI platforms */
#define IPI_DOORBELL_START (0)
#define IPI_DOORBELL_END (8)
#define IPI_DOORBELL_MASK 0xFF
......@@ -143,6 +146,14 @@
#define PCI_MSI_DOORBELL_END (32)
#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
/* MSI interrupt definitions for non-IPI platforms */
#define PCI_MSI_FULL_DOORBELL_START 0
#define PCI_MSI_FULL_DOORBELL_NR 32
#define PCI_MSI_FULL_DOORBELL_END 32
#define PCI_MSI_FULL_DOORBELL_MASK GENMASK(31, 0)
#define PCI_MSI_FULL_DOORBELL_SRC0_MASK GENMASK(15, 0)
#define PCI_MSI_FULL_DOORBELL_SRC1_MASK GENMASK(31, 16)
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
......@@ -151,11 +162,46 @@ static int parent_irq;
#ifdef CONFIG_PCI_MSI
static struct irq_domain *armada_370_xp_msi_domain;
static struct irq_domain *armada_370_xp_msi_inner_domain;
static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
static DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
#endif
static inline bool is_ipi_available(void)
{
/*
* We distinguish IPI availability in the IC by the IC not having a
* parent irq defined. If a parent irq is defined, there is a parent
* interrupt controller (e.g. GIC) that takes care of inter-processor
* interrupts.
*/
return parent_irq <= 0;
}
static inline u32 msi_doorbell_mask(void)
{
return is_ipi_available() ? PCI_MSI_DOORBELL_MASK :
PCI_MSI_FULL_DOORBELL_MASK;
}
static inline unsigned int msi_doorbell_start(void)
{
return is_ipi_available() ? PCI_MSI_DOORBELL_START :
PCI_MSI_FULL_DOORBELL_START;
}
static inline unsigned int msi_doorbell_size(void)
{
return is_ipi_available() ? PCI_MSI_DOORBELL_NR :
PCI_MSI_FULL_DOORBELL_NR;
}
static inline unsigned int msi_doorbell_end(void)
{
return is_ipi_available() ? PCI_MSI_DOORBELL_END :
PCI_MSI_FULL_DOORBELL_END;
}
static inline bool is_percpu_irq(irq_hw_number_t irq)
{
if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
......@@ -213,7 +259,7 @@ static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg
msg->address_lo = lower_32_bits(msi_doorbell_addr);
msg->address_hi = upper_32_bits(msi_doorbell_addr);
msg->data = BIT(cpu + 8) | (data->hwirq + PCI_MSI_DOORBELL_START);
msg->data = BIT(cpu + 8) | (data->hwirq + msi_doorbell_start());
}
static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
......@@ -246,7 +292,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
int hwirq, i;
mutex_lock(&msi_used_lock);
hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
hwirq = bitmap_find_free_region(msi_used, msi_doorbell_size(),
order_base_2(nr_irqs));
mutex_unlock(&msi_used_lock);
......@@ -283,9 +329,10 @@ static void armada_370_xp_msi_reenable_percpu(void)
u32 reg;
/* Enable MSI doorbell mask and combined cpu local interrupt */
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
| PCI_MSI_DOORBELL_MASK;
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
reg |= msi_doorbell_mask();
writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
/* Unmask local doorbell interrupt */
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
......@@ -297,7 +344,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
ARMADA_370_XP_SW_TRIG_INT_OFFS;
armada_370_xp_msi_inner_domain =
irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
irq_domain_add_linear(NULL, msi_doorbell_size(),
&armada_370_xp_msi_domain_ops, NULL);
if (!armada_370_xp_msi_inner_domain)
return -ENOMEM;
......@@ -313,6 +360,10 @@ static int armada_370_xp_msi_init(struct device_node *node,
armada_370_xp_msi_reenable_percpu();
/* Unmask low 16 MSI irqs on non-IPI platforms */
if (!is_ipi_available())
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
return 0;
}
#else
......@@ -461,24 +512,18 @@ static __init void armada_xp_ipi_init(struct device_node *node)
set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
}
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long reg, mask;
int cpu;
/* Select a single core from the affinity mask which is online */
cpu = cpumask_any_and(mask_val, cpu_online_mask);
mask = 1UL << cpu_logical_map(cpu);
raw_spin_lock(&irq_controller_lock);
reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
raw_spin_unlock(&irq_controller_lock);
atomic_io_modify(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq),
ARMADA_370_XP_INT_SOURCE_CPU_MASK,
BIT(cpu_logical_map(cpu)));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
......@@ -496,6 +541,9 @@ static void armada_xp_mpic_smp_cpu_init(void)
for (i = 0; i < nr_irqs; i++)
writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
if (!is_ipi_available())
return;
/* Disable all IPIs */
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
......@@ -527,7 +575,8 @@ static void armada_xp_mpic_reenable_percpu(void)
armada_370_xp_irq_unmask(data);
}
ipi_resume();
if (is_ipi_available())
ipi_resume();
armada_370_xp_msi_reenable_percpu();
}
......@@ -566,6 +615,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
/* IRQs 0 and 1 cannot be mapped, they are handled internally */
if (hw <= 1)
return -EINVAL;
armada_370_xp_irq_mask(irq_get_irq_data(virq));
if (!is_percpu_irq(hw))
writel(hw, per_cpu_int_base +
......@@ -599,20 +652,20 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
u32 msimask, msinr;
msimask = readl_relaxed(per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& PCI_MSI_DOORBELL_MASK;
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
msimask &= msi_doorbell_mask();
writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
for (msinr = PCI_MSI_DOORBELL_START;
msinr < PCI_MSI_DOORBELL_END; msinr++) {
for (msinr = msi_doorbell_start();
msinr < msi_doorbell_end(); msinr++) {
unsigned int irq;
if (!(msimask & BIT(msinr)))
continue;
irq = msinr - PCI_MSI_DOORBELL_START;
irq = msinr - msi_doorbell_start();
generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
}
......@@ -641,7 +694,7 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
continue;
if (irqn == 1) {
if (irqn == 0 || irqn == 1) {
armada_370_xp_handle_msi_irq(NULL, true);
continue;
}
......@@ -702,6 +755,7 @@ static int armada_370_xp_mpic_suspend(void)
static void armada_370_xp_mpic_resume(void)
{
bool src0, src1;
int nirqs;
irq_hw_number_t irq;
......@@ -741,12 +795,22 @@ static void armada_370_xp_mpic_resume(void)
/* Reconfigure doorbells for IPIs and MSIs */
writel(doorbell_mask_reg,
per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
if (doorbell_mask_reg & IPI_DOORBELL_MASK)
if (is_ipi_available()) {
src0 = doorbell_mask_reg & IPI_DOORBELL_MASK;
src1 = doorbell_mask_reg & PCI_MSI_DOORBELL_MASK;
} else {
src0 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
src1 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
}
if (src0)
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
if (src1)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
ipi_resume();
if (is_ipi_available())
ipi_resume();
}
static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
......@@ -791,13 +855,18 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!armada_370_xp_mpic_domain);
irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
/*
* Initialize parent_irq before calling any other functions, since it is
* used to distinguish between IPI and non-IPI platforms.
*/
parent_irq = irq_of_parse_and_map(node, 0);
/* Setup for the boot CPU */
armada_xp_mpic_perf_init();
armada_xp_mpic_smp_cpu_init();
armada_370_xp_msi_init(node, main_int_res.start);
parent_irq = irq_of_parse_and_map(node, 0);
if (parent_irq <= 0) {
irq_set_default_host(armada_370_xp_mpic_domain);
set_handle_irq(armada_370_xp_handle_irq);
......
......@@ -102,7 +102,9 @@ static void armctrl_unmask_irq(struct irq_data *d)
static struct irq_chip armctrl_chip = {
.name = "ARMCTRL-level",
.irq_mask = armctrl_mask_irq,
.irq_unmask = armctrl_unmask_irq
.irq_unmask = armctrl_unmask_irq,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SKIP_SET_WAKE,
};
static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
......
......@@ -1317,7 +1317,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
{
struct its_cmd_desc desc = {};
struct its_node *its;
unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
......@@ -1329,6 +1328,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
return;
}
/*
* Protect against concurrent updates of the mapping state on
* individual VMs.
*/
guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
/*
* Yet another marvel of the architecture. If using the
* its_list "feature", we need to make sure that all ITSs
......@@ -1337,8 +1342,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
*
* Wall <-- Head.
*/
raw_spin_lock_irqsave(&vmovp_lock, flags);
guard(raw_spinlock)(&vmovp_lock);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
......@@ -1353,8 +1357,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
......@@ -1791,12 +1793,10 @@ static bool gic_requires_eager_mapping(void)
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
unsigned long flags;
if (gic_requires_eager_mapping())
return;
raw_spin_lock_irqsave(&vmovp_lock, flags);
guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
/*
* If the VM wasn't mapped yet, iterate over the vpes and get
......@@ -1809,37 +1809,31 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
for (i = 0; i < vm->nr_vpes; i++) {
struct its_vpe *vpe = vm->vpes[i];
struct irq_data *d = irq_get_irq_data(vpe->irq);
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
its_send_vmapp(its, vpe, true);
scoped_guard(raw_spinlock, &vpe->vpe_lock)
its_send_vmapp(its, vpe, true);
its_send_vinvall(its, vpe);
irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
}
}
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
{
unsigned long flags;
/* Not using the ITS list? Everything is always mapped. */
if (gic_requires_eager_mapping())
return;
raw_spin_lock_irqsave(&vmovp_lock, flags);
guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
if (!--vm->vlpi_count[its->list_nr]) {
int i;
for (i = 0; i < vm->nr_vpes; i++)
for (i = 0; i < vm->nr_vpes; i++) {
guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
its_send_vmapp(its, vm->vpes[i], false);
}
}
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
......@@ -3926,6 +3920,8 @@ static void its_vpe_invall(struct its_vpe *vpe)
{
struct its_node *its;
guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
......@@ -4531,6 +4527,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->db_lpi_base = base;
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
raw_spin_lock_init(&vm->vmapp_lock);
if (gic_rdists->has_rvpeid)
irqchip = &its_vpe_4_1_irq_chip;
......@@ -4562,6 +4559,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
/*
* If we use the list map, we issue VMAPP on demand... Unless
* we're on a GICv4.1 and we eagerly map the VPE on all ITSs
......@@ -4570,9 +4571,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
if (!gic_requires_eager_mapping())
return 0;
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
......@@ -4581,8 +4579,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
its_send_vinvall(its, vpe);
}
irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
return 0;
}
......@@ -5580,6 +5576,10 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
goto node_err;
}
if (acpi_get_madt_revision() >= 7 &&
(its_entry->flags & ACPI_MADT_ITS_NON_COHERENT))
its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
err = its_probe_one(its);
if (!err)
return 0;
......
......@@ -2203,11 +2203,10 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
of_node_put(parts_node);
}
static void __init gic_of_setup_kvm_info(struct device_node *node)
static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions)
{
int ret;
struct resource r;
u32 gicv_idx;
gic_v3_kvm_info.type = GIC_V3;
......@@ -2215,12 +2214,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (!gic_v3_kvm_info.maint_irq)
return;
if (of_property_read_u32(node, "#redistributor-regions",
&gicv_idx))
gicv_idx = 1;
gicv_idx += 3; /* Also skip GICD, GICC, GICH */
ret = of_address_to_resource(node, gicv_idx, &r);
/* Also skip GICD, GICC, GICH */
ret = of_address_to_resource(node, nr_redist_regions + 3, &r);
if (!ret)
gic_v3_kvm_info.vcpu = r;
......@@ -2310,7 +2305,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
gic_populate_ppi_partitions(node);
if (static_branch_likely(&supports_deactivate_key))
gic_of_setup_kvm_info(node);
gic_of_setup_kvm_info(node, nr_redist_regions);
return 0;
out_unmap_rdist:
......@@ -2362,6 +2357,11 @@ gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
return -ENOMEM;
}
if (acpi_get_madt_revision() >= 7 &&
(redist->flags & ACPI_MADT_GICR_NON_COHERENT))
gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
gic_request_region(redist->base_address, redist->length, "GICR");
gic_acpi_register_redist(redist->base_address, redist_base);
......@@ -2402,6 +2402,10 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
return -ENOMEM;
gic_request_region(gicc->gicr_base_address, size, "GICR");
if (acpi_get_madt_revision() >= 7 &&
(gicc->flags & ACPI_MADT_GICC_NON_COHERENT))
gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
return 0;
}
......
......@@ -36,6 +36,7 @@ struct irqsteer_data {
int channel;
struct irq_domain *domain;
u32 *saved_reg;
struct device *dev;
};
static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
......@@ -72,10 +73,26 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&data->lock, flags);
}
static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
{
struct irqsteer_data *data = d->chip_data;
pm_runtime_get_sync(data->dev);
}
static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
{
struct irqsteer_data *data = d->chip_data;
pm_runtime_put_autosuspend(data->dev);
}
static const struct irq_chip imx_irqsteer_irq_chip = {
.name = "irqsteer",
.irq_mask = imx_irqsteer_irq_mask,
.irq_unmask = imx_irqsteer_irq_unmask,
.name = "irqsteer",
.irq_mask = imx_irqsteer_irq_mask,
.irq_unmask = imx_irqsteer_irq_unmask,
.irq_bus_lock = imx_irqsteer_irq_bus_lock,
.irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock,
};
static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
......@@ -150,6 +167,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
data->dev = &pdev->dev;
data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
dev_err(&pdev->dev, "failed to initialize reg\n");
......
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Microchip LAN966x outbound interrupt controller
*
* Copyright (c) 2024 Technology Inc. and its subsidiaries.
*
* Authors:
* Horatiu Vultur <horatiu.vultur@microchip.com>
* Clément Léger <clement.leger@bootlin.com>
* Herve Codina <herve.codina@bootlin.com>
*/
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
struct lan966x_oic_chip_regs {
int reg_off_ena_set;
int reg_off_ena_clr;
int reg_off_sticky;
int reg_off_ident;
int reg_off_map;
};
struct lan966x_oic_data {
void __iomem *regs;
int irq;
};
#define LAN966X_OIC_NR_IRQ 86
/* Interrupt sticky status */
#define LAN966X_OIC_INTR_STICKY 0x30
#define LAN966X_OIC_INTR_STICKY1 0x34
#define LAN966X_OIC_INTR_STICKY2 0x38
/* Interrupt enable */
#define LAN966X_OIC_INTR_ENA 0x48
#define LAN966X_OIC_INTR_ENA1 0x4c
#define LAN966X_OIC_INTR_ENA2 0x50
/* Atomic clear of interrupt enable */
#define LAN966X_OIC_INTR_ENA_CLR 0x54
#define LAN966X_OIC_INTR_ENA_CLR1 0x58
#define LAN966X_OIC_INTR_ENA_CLR2 0x5c
/* Atomic set of interrupt */
#define LAN966X_OIC_INTR_ENA_SET 0x60
#define LAN966X_OIC_INTR_ENA_SET1 0x64
#define LAN966X_OIC_INTR_ENA_SET2 0x68
/* Mapping of source to destination interrupts (_n = 0..8) */
#define LAN966X_OIC_DST_INTR_MAP(_n) (0x78 + (_n) * 4)
#define LAN966X_OIC_DST_INTR_MAP1(_n) (0x9c + (_n) * 4)
#define LAN966X_OIC_DST_INTR_MAP2(_n) (0xc0 + (_n) * 4)
/* Currently active interrupt sources per destination (_n = 0..8) */
#define LAN966X_OIC_DST_INTR_IDENT(_n) (0xe4 + (_n) * 4)
#define LAN966X_OIC_DST_INTR_IDENT1(_n) (0x108 + (_n) * 4)
#define LAN966X_OIC_DST_INTR_IDENT2(_n) (0x12c + (_n) * 4)
static unsigned int lan966x_oic_irq_startup(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct irq_chip_type *ct = irq_data_get_chip_type(data);
struct lan966x_oic_chip_regs *chip_regs = gc->private;
u32 map;
irq_gc_lock(gc);
/* Map the source interrupt to the destination */
map = irq_reg_readl(gc, chip_regs->reg_off_map);
map |= data->mask;
irq_reg_writel(gc, map, chip_regs->reg_off_map);
irq_gc_unlock(gc);
ct->chip.irq_ack(data);
ct->chip.irq_unmask(data);
return 0;
}
static void lan966x_oic_irq_shutdown(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct irq_chip_type *ct = irq_data_get_chip_type(data);
struct lan966x_oic_chip_regs *chip_regs = gc->private;
u32 map;
ct->chip.irq_mask(data);
irq_gc_lock(gc);
/* Unmap the interrupt */
map = irq_reg_readl(gc, chip_regs->reg_off_map);
map &= ~data->mask;
irq_reg_writel(gc, map, chip_regs->reg_off_map);
irq_gc_unlock(gc);
}
static int lan966x_oic_irq_set_type(struct irq_data *data,
unsigned int flow_type)
{
if (flow_type != IRQ_TYPE_LEVEL_HIGH) {
pr_err("lan966x oic doesn't support flow type %d\n", flow_type);
return -EINVAL;
}
return 0;
}
static void lan966x_oic_irq_handler_domain(struct irq_domain *d, u32 first_irq)
{
struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, first_irq);
struct lan966x_oic_chip_regs *chip_regs = gc->private;
unsigned long ident;
unsigned int hwirq;
ident = irq_reg_readl(gc, chip_regs->reg_off_ident);
if (!ident)
return;
for_each_set_bit(hwirq, &ident, 32)
generic_handle_domain_irq(d, hwirq + first_irq);
}
static void lan966x_oic_irq_handler(struct irq_desc *desc)
{
struct irq_domain *d = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
lan966x_oic_irq_handler_domain(d, 0);
lan966x_oic_irq_handler_domain(d, 32);
lan966x_oic_irq_handler_domain(d, 64);
chained_irq_exit(chip, desc);
}
static struct lan966x_oic_chip_regs lan966x_oic_chip_regs[3] = {
{
.reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET,
.reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR,
.reg_off_sticky = LAN966X_OIC_INTR_STICKY,
.reg_off_ident = LAN966X_OIC_DST_INTR_IDENT(0),
.reg_off_map = LAN966X_OIC_DST_INTR_MAP(0),
}, {
.reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET1,
.reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR1,
.reg_off_sticky = LAN966X_OIC_INTR_STICKY1,
.reg_off_ident = LAN966X_OIC_DST_INTR_IDENT1(0),
.reg_off_map = LAN966X_OIC_DST_INTR_MAP1(0),
}, {
.reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET2,
.reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR2,
.reg_off_sticky = LAN966X_OIC_INTR_STICKY2,
.reg_off_ident = LAN966X_OIC_DST_INTR_IDENT2(0),
.reg_off_map = LAN966X_OIC_DST_INTR_MAP2(0),
}
};
static int lan966x_oic_chip_init(struct irq_chip_generic *gc)
{
struct lan966x_oic_data *lan966x_oic = gc->domain->host_data;
struct lan966x_oic_chip_regs *chip_regs;
chip_regs = &lan966x_oic_chip_regs[gc->irq_base / 32];
gc->reg_base = lan966x_oic->regs;
gc->chip_types[0].regs.enable = chip_regs->reg_off_ena_set;
gc->chip_types[0].regs.disable = chip_regs->reg_off_ena_clr;
gc->chip_types[0].regs.ack = chip_regs->reg_off_sticky;
gc->chip_types[0].chip.irq_startup = lan966x_oic_irq_startup;
gc->chip_types[0].chip.irq_shutdown = lan966x_oic_irq_shutdown;
gc->chip_types[0].chip.irq_set_type = lan966x_oic_irq_set_type;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
gc->private = chip_regs;
/* Disable all interrupts handled by this chip */
irq_reg_writel(gc, ~0U, chip_regs->reg_off_ena_clr);
return 0;
}
static void lan966x_oic_chip_exit(struct irq_chip_generic *gc)
{
/* Disable and ack all interrupts handled by this chip */
irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.disable);
irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.ack);
}
static int lan966x_oic_domain_init(struct irq_domain *d)
{
struct lan966x_oic_data *lan966x_oic = d->host_data;
irq_set_chained_handler_and_data(lan966x_oic->irq, lan966x_oic_irq_handler, d);
return 0;
}
static void lan966x_oic_domain_exit(struct irq_domain *d)
{
struct lan966x_oic_data *lan966x_oic = d->host_data;
irq_set_chained_handler_and_data(lan966x_oic->irq, NULL, NULL);
}
static int lan966x_oic_probe(struct platform_device *pdev)
{
struct irq_domain_chip_generic_info dgc_info = {
.name = "lan966x-oic",
.handler = handle_level_irq,
.irqs_per_chip = 32,
.num_ct = 1,
.init = lan966x_oic_chip_init,
.exit = lan966x_oic_chip_exit,
};
struct irq_domain_info d_info = {
.fwnode = of_node_to_fwnode(pdev->dev.of_node),
.domain_flags = IRQ_DOMAIN_FLAG_DESTROY_GC,
.size = LAN966X_OIC_NR_IRQ,
.hwirq_max = LAN966X_OIC_NR_IRQ,
.ops = &irq_generic_chip_ops,
.dgc_info = &dgc_info,
.init = lan966x_oic_domain_init,
.exit = lan966x_oic_domain_exit,
};
struct lan966x_oic_data *lan966x_oic;
struct device *dev = &pdev->dev;
struct irq_domain *domain;
lan966x_oic = devm_kmalloc(dev, sizeof(*lan966x_oic), GFP_KERNEL);
if (!lan966x_oic)
return -ENOMEM;
lan966x_oic->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lan966x_oic->regs))
return dev_err_probe(dev, PTR_ERR(lan966x_oic->regs),
"failed to map resource\n");
lan966x_oic->irq = platform_get_irq(pdev, 0);
if (lan966x_oic->irq < 0)
return dev_err_probe(dev, lan966x_oic->irq, "failed to get the IRQ\n");
d_info.host_data = lan966x_oic;
domain = devm_irq_domain_instantiate(dev, &d_info);
if (IS_ERR(domain))
return dev_err_probe(dev, PTR_ERR(domain),
"failed to instantiate the IRQ domain\n");
return 0;
}
static const struct of_device_id lan966x_oic_of_match[] = {
{ .compatible = "microchip,lan966x-oic" },
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, lan966x_oic_of_match);
static struct platform_driver lan966x_oic_driver = {
.probe = lan966x_oic_probe,
.driver = {
.name = "lan966x-oic",
.of_match_table = lan966x_oic_of_match,
},
};
module_platform_driver(lan966x_oic_driver);
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
MODULE_DESCRIPTION("Microchip LAN966x OIC driver");
MODULE_LICENSE("GPL");
......@@ -608,5 +608,6 @@ IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_DESCRIPTION("Meson GPIO Interrupt Multiplexer driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:meson-gpio-intc");
......@@ -193,6 +193,7 @@ module_platform_driver(mvebu_pic_driver);
MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_DESCRIPTION("Marvell Armada 7K/8K PIC driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:mvebu_pic");
......@@ -37,6 +37,8 @@
#define TSSEL_SHIFT(n) (8 * (n))
#define TSSEL_MASK GENMASK(7, 0)
#define IRQ_MASK 0x3
#define IMSK 0x10010
#define TMSK 0x10020
#define TSSR_OFFSET(n) ((n) % 4)
#define TSSR_INDEX(n) ((n) / 4)
......@@ -69,12 +71,14 @@ struct rzg2l_irqc_reg_cache {
/**
* struct rzg2l_irqc_priv - IRQ controller private data structure
* @base: Controller's base address
* @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
* @cache: Registers cache for suspend/resume
*/
static struct rzg2l_irqc_priv {
void __iomem *base;
const struct irq_chip *irqchip;
struct irq_fwspec fwspec[IRQC_NUM_IRQ];
raw_spinlock_t lock;
struct rzg2l_irqc_reg_cache cache;
......@@ -138,6 +142,111 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
irq_chip_eoi_parent(d);
}
static void rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv *priv,
unsigned int hwirq)
{
u32 bit = BIT(hwirq - IRQC_IRQ_START);
writel_relaxed(readl_relaxed(priv->base + IMSK) | bit, priv->base + IMSK);
}
static void rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv *priv,
unsigned int hwirq)
{
u32 bit = BIT(hwirq - IRQC_IRQ_START);
writel_relaxed(readl_relaxed(priv->base + IMSK) & ~bit, priv->base + IMSK);
}
static void rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv *priv,
unsigned int hwirq)
{
u32 bit = BIT(hwirq - IRQC_TINT_START);
writel_relaxed(readl_relaxed(priv->base + TMSK) | bit, priv->base + TMSK);
}
static void rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv *priv,
unsigned int hwirq)
{
u32 bit = BIT(hwirq - IRQC_TINT_START);
writel_relaxed(readl_relaxed(priv->base + TMSK) & ~bit, priv->base + TMSK);
}
static void rzfive_irqc_mask(struct irq_data *d)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
raw_spin_lock(&priv->lock);
if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
rzfive_irqc_mask_irq_interrupt(priv, hwirq);
else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
rzfive_irqc_mask_tint_interrupt(priv, hwirq);
raw_spin_unlock(&priv->lock);
irq_chip_mask_parent(d);
}
static void rzfive_irqc_unmask(struct irq_data *d)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
raw_spin_lock(&priv->lock);
if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
raw_spin_unlock(&priv->lock);
irq_chip_unmask_parent(d);
}
static void rzfive_tint_irq_endisable(struct irq_data *d, bool enable)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) {
u32 offset = hwirq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
u8 tssr_index = TSSR_INDEX(offset);
u32 reg;
raw_spin_lock(&priv->lock);
if (enable)
rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
else
rzfive_irqc_mask_tint_interrupt(priv, hwirq);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
if (enable)
reg |= TIEN << TSSEL_SHIFT(tssr_offset);
else
reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
} else {
raw_spin_lock(&priv->lock);
if (enable)
rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
else
rzfive_irqc_mask_irq_interrupt(priv, hwirq);
raw_spin_unlock(&priv->lock);
}
}
static void rzfive_irqc_irq_disable(struct irq_data *d)
{
irq_chip_disable_parent(d);
rzfive_tint_irq_endisable(d, false);
}
static void rzfive_irqc_irq_enable(struct irq_data *d)
{
rzfive_tint_irq_endisable(d, true);
irq_chip_enable_parent(d);
}
static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
{
unsigned int hw_irq = irqd_to_hwirq(d);
......@@ -162,8 +271,8 @@ static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
static void rzg2l_irqc_irq_disable(struct irq_data *d)
{
rzg2l_tint_irq_endisable(d, false);
irq_chip_disable_parent(d);
rzg2l_tint_irq_endisable(d, false);
}
static void rzg2l_irqc_irq_enable(struct irq_data *d)
......@@ -321,7 +430,7 @@ static struct syscore_ops rzg2l_irqc_syscore_ops = {
.resume = rzg2l_irqc_irq_resume,
};
static const struct irq_chip irqc_chip = {
static const struct irq_chip rzg2l_irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
.irq_mask = irq_chip_mask_parent,
......@@ -338,6 +447,23 @@ static const struct irq_chip irqc_chip = {
IRQCHIP_SKIP_SET_WAKE,
};
static const struct irq_chip rzfive_irqc_chip = {
.name = "rzfive-irqc",
.irq_eoi = rzg2l_irqc_eoi,
.irq_mask = rzfive_irqc_mask,
.irq_unmask = rzfive_irqc_unmask,
.irq_disable = rzfive_irqc_irq_disable,
.irq_enable = rzfive_irqc_irq_enable,
.irq_get_irqchip_state = irq_chip_get_parent_state,
.irq_set_irqchip_state = irq_chip_set_parent_state,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = rzg2l_irqc_set_type,
.irq_set_affinity = irq_chip_set_affinity_parent,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE,
};
static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
......@@ -369,7 +495,7 @@ static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
if (hwirq > (IRQC_NUM_IRQ - 1))
return -EINVAL;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip,
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
......@@ -401,7 +527,8 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
return 0;
}
static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
const struct irq_chip *irq_chip)
{
struct irq_domain *irq_domain, *parent_domain;
struct platform_device *pdev;
......@@ -422,6 +549,8 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
if (!rzg2l_irqc_data)
return -ENOMEM;
rzg2l_irqc_data->irqchip = irq_chip;
rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
if (IS_ERR(rzg2l_irqc_data->base))
return PTR_ERR(rzg2l_irqc_data->base);
......@@ -472,8 +601,21 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
return ret;
}
static int __init rzg2l_irqc_init(struct device_node *node,
struct device_node *parent)
{
return rzg2l_irqc_common_init(node, parent, &rzg2l_irqc_chip);
}
static int __init rzfive_irqc_init(struct device_node *node,
struct device_node *parent)
{
return rzg2l_irqc_common_init(node, parent, &rzfive_irqc_chip);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init)
IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
......@@ -127,6 +127,7 @@ static void aplic_init_hw_irqs(struct aplic_priv *priv)
int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs)
{
struct device_node *np = to_of_node(dev->fwnode);
struct of_phandle_args parent;
int rc;
......@@ -134,7 +135,7 @@ int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *
* Currently, only OF fwnode is supported so extend this
* function for ACPI support.
*/
if (!is_of_node(dev->fwnode))
if (!np)
return -EINVAL;
/* Save device pointer and register base */
......@@ -142,8 +143,7 @@ int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *
priv->regs = regs;
/* Find out number of interrupt sources */
rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,num-sources",
&priv->nr_irqs);
rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs);
if (rc) {
dev_err(dev, "failed to get number of interrupt sources\n");
return rc;
......@@ -155,8 +155,8 @@ int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *
* If "msi-parent" property is present then we ignore the
* APLIC IDCs which forces the APLIC driver to use MSI mode.
*/
if (!of_property_present(to_of_node(dev->fwnode), "msi-parent")) {
while (!of_irq_parse_one(to_of_node(dev->fwnode), priv->nr_idcs, &parent))
if (!of_property_present(np, "msi-parent")) {
while (!of_irq_parse_one(np, priv->nr_idcs, &parent))
priv->nr_idcs++;
}
......@@ -184,8 +184,7 @@ static int aplic_probe(struct platform_device *pdev)
* If msi-parent property is present then setup APLIC MSI
* mode otherwise setup APLIC direct mode.
*/
if (is_of_node(dev->fwnode))
msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
if (msi_mode)
rc = aplic_msi_setup(dev, regs);
else
......
......@@ -26,7 +26,7 @@ static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
static void riscv_intc_irq(struct pt_regs *regs)
{
unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
......@@ -34,7 +34,7 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
}
static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs)
static void riscv_intc_aia_irq(struct pt_regs *regs)
{
unsigned long topi;
......
This diff is collapsed.
This diff is collapsed.
......@@ -163,5 +163,6 @@ static struct platform_driver ts4800_ic_driver = {
module_platform_driver(ts4800_ic_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
MODULE_DESCRIPTION("Multiplexed-IRQs driver for TS-4800's FPGA");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ts4800_irqc");
......@@ -274,6 +274,9 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
return phys_id == PHYS_CPUID_INVALID;
}
int __init acpi_get_madt_revision(void);
/* Validate the processor object's proc_id */
bool acpi_duplicate_processor_id(int proc_id);
/* Processor _CTS control */
......
......@@ -169,7 +169,7 @@ static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
return request_threaded_irq(irq, handler, NULL, flags, name, dev);
return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev);
}
extern int __must_check
......
......@@ -1106,6 +1106,7 @@ enum irq_gc_flags {
* @irq_flags_to_set: IRQ* flags to set on irq setup
* @irq_flags_to_clear: IRQ* flags to clear on irq setup
* @gc_flags: Generic chip specific setup flags
* @exit: Function called on each chip when they are destroyed.
* @gc: Array of pointers to generic interrupt chips
*/
struct irq_domain_chip_generic {
......@@ -1114,9 +1115,37 @@ struct irq_domain_chip_generic {
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
void (*exit)(struct irq_chip_generic *gc);
struct irq_chip_generic *gc[];
};
/**
* struct irq_domain_chip_generic_info - Generic chip information structure
* @name: Name of the generic interrupt chip
* @handler: Interrupt handler used by the generic interrupt chip
* @irqs_per_chip: Number of interrupts each chip handles (max 32)
* @num_ct: Number of irq_chip_type instances associated with each
* chip
* @irq_flags_to_clear: IRQ_* bits to clear in the mapping function
* @irq_flags_to_set: IRQ_* bits to set in the mapping function
* @gc_flags: Generic chip specific setup flags
* @init: Function called on each chip when they are created.
* Allow to do some additional chip initialisation.
* @exit: Function called on each chip when they are destroyed.
* Allow to do some chip cleanup operation.
*/
struct irq_domain_chip_generic_info {
const char *name;
irq_flow_handler_t handler;
unsigned int irqs_per_chip;
unsigned int num_ct;
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
int (*init)(struct irq_chip_generic *gc);
void (*exit)(struct irq_chip_generic *gc);
};
/* Generic chip callback functions */
void irq_gc_noop(struct irq_data *d);
void irq_gc_mask_disable_reg(struct irq_data *d);
......@@ -1153,6 +1182,20 @@ int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
#ifdef CONFIG_GENERIC_IRQ_CHIP
int irq_domain_alloc_generic_chips(struct irq_domain *d,
const struct irq_domain_chip_generic_info *info);
void irq_domain_remove_generic_chips(struct irq_domain *d);
#else
static inline int
irq_domain_alloc_generic_chips(struct irq_domain *d,
const struct irq_domain_chip_generic_info *info)
{
return -EINVAL;
}
static inline void irq_domain_remove_generic_chips(struct irq_domain *d) { }
#endif /* CONFIG_GENERIC_IRQ_CHIP */
int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
......
......@@ -25,6 +25,14 @@ struct its_vm {
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
/*
* Ensures mutual exclusion between updates to vlpi_count[]
* and map/unmap when using the ITSList mechanism.
*
* The lock order for any sequence involving the ITSList is
* vmapp_lock -> vpe_lock ->vmovp_lock.
*/
raw_spinlock_t vmapp_lock;
u32 vlpi_count[GICv4_ITS_LIST_MAX];
};
......
......@@ -74,11 +74,24 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
* 1 on a match
* @select: Match an interrupt controller fw specification. It is more generic
* than @match as it receives a complete struct irq_fwspec. Therefore,
* @select is preferred if provided. Returns 1 on a match.
* @map: Create or update a mapping between a virtual irq number and a hw
* irq number. This is called only once for a given mapping.
* @unmap: Dispose of such a mapping
* @xlate: Given a device tree node and interrupt specifier, decode
* the hardware irq number and linux irq type value.
* @alloc: Allocate @nr_irqs interrupts starting from @virq.
* @free: Free @nr_irqs interrupts starting from @virq.
* @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
* reserve the vector. If unset, assign the vector (called from
* request_irq()).
* @deactivate: Disarm one interrupt (@irqd).
* @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
* linux irq type value (@out_type). This is a generalised @xlate
* (over struct irq_fwspec) and is preferred if provided.
* @debug_show: For domains to show specific data for an interrupt in debugfs.
*
* Functions below are provided by the driver and called whenever a new mapping
* is created or an old mapping is disposed. The driver can then proceed to
......@@ -131,6 +144,9 @@ struct irq_domain_chip_generic;
* Optional elements:
* @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
* to swap it for the of_node via the irq_domain_get_of_node accessor
* @bus_token: @fwnode's device_node might be used for several irq domains. But
* in connection with @bus_token, the pair shall be unique in a
* system.
* @gc: Pointer to a list of generic chips. There is a helper function for
* setting up one or more generic chips for interrupt controllers
* drivers using the generic chip library which uses this pointer.
......@@ -141,9 +157,12 @@ struct irq_domain_chip_generic;
* purposes related to the irq domain.
* @parent: Pointer to parent irq_domain to support hierarchy irq_domains
* @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init
* @exit: Function called when the domain is destroyed
*
* Revmap data, used internally by the irq domain code:
* @revmap_size: Size of the linear map table @revmap[]
* @hwirq_max: Top limit for the HW irq number. Especially to avoid
* conflicts/failures with reserved HW irqs. Can be ~0.
* @revmap_size: Size of the linear map table @revmap
* @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
* @revmap: Linear table of irq_data pointers
*/
......@@ -169,6 +188,7 @@ struct irq_domain {
#ifdef CONFIG_GENERIC_MSI_IRQ
const struct msi_parent_ops *msi_parent_ops;
#endif
void (*exit)(struct irq_domain *d);
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
......@@ -182,7 +202,7 @@ enum {
/* Irq domain is hierarchical */
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
/* Irq domain name was allocated in __irq_domain_add() */
/* Irq domain name was allocated internally */
IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
......@@ -208,6 +228,9 @@ enum {
/* Irq domain is a MSI device domain */
IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9),
/* Irq domain must destroy generic chips when removed */
IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
......@@ -257,10 +280,51 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain_chip_generic_info;
/**
* struct irq_domain_info - Domain information structure
* @fwnode: firmware node for the interrupt controller
* @domain_flags: Additional flags to add to the domain flags
* @size: Size of linear map; 0 for radix mapping only
* @hwirq_max: Maximum number of interrupts supported by controller
* @direct_max: Maximum value of direct maps;
* Use ~0 for no limit; 0 for no direct mapping
* @bus_token: Domain bus token
* @ops: Domain operation callbacks
* @host_data: Controller private data pointer
* @dgc_info: Geneneric chip information structure pointer used to
* create generic chips for the domain if not NULL.
* @init: Function called when the domain is created.
* Allow to do some additional domain initialisation.
* @exit: Function called when the domain is destroyed.
* Allow to do some additional cleanup operation.
*/
struct irq_domain_info {
struct fwnode_handle *fwnode;
unsigned int domain_flags;
unsigned int size;
irq_hw_number_t hwirq_max;
int direct_max;
enum irq_domain_bus_token bus_token;
const struct irq_domain_ops *ops;
void *host_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
* @parent: Pointer to the parent irq domain used in a hierarchy domain
*/
struct irq_domain *parent;
#endif
struct irq_domain_chip_generic_info *dgc_info;
int (*init)(struct irq_domain *d);
void (*exit)(struct irq_domain *d);
};
struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info);
struct irq_domain *devm_irq_domain_instantiate(struct device *dev,
const struct irq_domain_info *info);
struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
unsigned int size,
unsigned int first_irq,
......@@ -293,7 +357,7 @@ static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
extern const struct fwnode_operations irqchip_fwnode_ops;
static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
{
return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
......@@ -350,7 +414,17 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
const struct irq_domain_ops *ops,
void *host_data)
{
return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
struct irq_domain_info info = {
.fwnode = of_node_to_fwnode(of_node),
.size = size,
.hwirq_max = size,
.ops = ops,
.host_data = host_data,
};
struct irq_domain *d;
d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
#ifdef CONFIG_IRQ_DOMAIN_NOMAP
......@@ -359,7 +433,17 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod
const struct irq_domain_ops *ops,
void *host_data)
{
return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
struct irq_domain_info info = {
.fwnode = of_node_to_fwnode(of_node),
.hwirq_max = max_irq,
.direct_max = max_irq,
.ops = ops,
.host_data = host_data,
};
struct irq_domain *d;
d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
......@@ -369,7 +453,16 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node
const struct irq_domain_ops *ops,
void *host_data)
{
return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data);
struct irq_domain_info info = {
.fwnode = of_node_to_fwnode(of_node),
.hwirq_max = ~0U,
.ops = ops,
.host_data = host_data,
};
struct irq_domain *d;
d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
......@@ -377,14 +470,33 @@ static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *
const struct irq_domain_ops *ops,
void *host_data)
{
return __irq_domain_add(fwnode, size, size, 0, ops, host_data);
struct irq_domain_info info = {
.fwnode = fwnode,
.size = size,
.hwirq_max = size,
.ops = ops,
.host_data = host_data,
};
struct irq_domain *d;
d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
const struct irq_domain_ops *ops,
void *host_data)
{
return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data);
struct irq_domain_info info = {
.fwnode = fwnode,
.hwirq_max = ~0,
.ops = ops,
.host_data = host_data,
};
struct irq_domain *d;
d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
extern void irq_domain_remove(struct irq_domain *host);
......
......@@ -9,14 +9,8 @@
static struct dentry *irq_dir;
struct irq_bit_descr {
unsigned int mask;
char *name;
};
#define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
const struct irq_bit_descr *sd, int size)
void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
const struct irq_bit_descr *sd, int size)
{
int i;
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/irq.h>
......@@ -282,3 +283,43 @@ int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
}
EXPORT_SYMBOL_GPL(devm_irq_setup_generic_chip);
#endif /* CONFIG_GENERIC_IRQ_CHIP */
#ifdef CONFIG_IRQ_DOMAIN
static void devm_irq_domain_remove(struct device *dev, void *res)
{
struct irq_domain **domain = res;
irq_domain_remove(*domain);
}
/**
* devm_irq_domain_instantiate() - Instantiate a new irq domain data for a
* managed device.
* @dev: Device to instantiate the domain for
* @info: Domain information pointer pointing to the information for this
* domain
*
* Return: A pointer to the instantiated irq domain or an ERR_PTR value.
*/
struct irq_domain *devm_irq_domain_instantiate(struct device *dev,
const struct irq_domain_info *info)
{
struct irq_domain *domain;
struct irq_domain **dr;
dr = devres_alloc(devm_irq_domain_remove, sizeof(*dr), GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
domain = irq_domain_instantiate(info);
if (!IS_ERR(domain)) {
*dr = domain;
devres_add(dev, dr);
} else {
devres_free(dr);
}
return domain;
}
EXPORT_SYMBOL_GPL(devm_irq_domain_instantiate);
#endif /* CONFIG_IRQ_DOMAIN */
......@@ -276,21 +276,14 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
}
/**
* __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
* @d: irq domain for which to allocate chips
* @irqs_per_chip: Number of interrupts each chip handles (max 32)
* @num_ct: Number of irq_chip_type instances associated with this
* @name: Name of the irq chip
* @handler: Default flow handler associated with these chips
* @clr: IRQ_* bits to clear in the mapping function
* @set: IRQ_* bits to set in the mapping function
* @gcflags: Generic chip specific setup flags
* irq_domain_alloc_generic_chips - Allocate generic chips for an irq domain
* @d: irq domain for which to allocate chips
* @info: Generic chip information
*
* Return: 0 on success, negative error code on failure
*/
int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
unsigned int clr, unsigned int set,
enum irq_gc_flags gcflags)
int irq_domain_alloc_generic_chips(struct irq_domain *d,
const struct irq_domain_chip_generic_info *info)
{
struct irq_domain_chip_generic *dgc;
struct irq_chip_generic *gc;
......@@ -300,27 +293,29 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
size_t gc_sz;
size_t sz;
void *tmp;
int ret;
if (d->gc)
return -EBUSY;
numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
numchips = DIV_ROUND_UP(d->revmap_size, info->irqs_per_chip);
if (!numchips)
return -EINVAL;
/* Allocate a pointer, generic chip and chiptypes for each chip */
gc_sz = struct_size(gc, chip_types, num_ct);
gc_sz = struct_size(gc, chip_types, info->num_ct);
dgc_sz = struct_size(dgc, gc, numchips);
sz = dgc_sz + numchips * gc_sz;
tmp = dgc = kzalloc(sz, GFP_KERNEL);
if (!dgc)
return -ENOMEM;
dgc->irqs_per_chip = irqs_per_chip;
dgc->irqs_per_chip = info->irqs_per_chip;
dgc->num_chips = numchips;
dgc->irq_flags_to_set = set;
dgc->irq_flags_to_clear = clr;
dgc->gc_flags = gcflags;
dgc->irq_flags_to_set = info->irq_flags_to_set;
dgc->irq_flags_to_clear = info->irq_flags_to_clear;
dgc->gc_flags = info->gc_flags;
dgc->exit = info->exit;
d->gc = dgc;
/* Calc pointer to the first generic chip */
......@@ -328,15 +323,22 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
for (i = 0; i < numchips; i++) {
/* Store the pointer to the generic chip */
dgc->gc[i] = gc = tmp;
irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
NULL, handler);
irq_init_generic_chip(gc, info->name, info->num_ct,
i * dgc->irqs_per_chip, NULL,
info->handler);
gc->domain = d;
if (gcflags & IRQ_GC_BE_IO) {
if (dgc->gc_flags & IRQ_GC_BE_IO) {
gc->reg_readl = &irq_readl_be;
gc->reg_writel = &irq_writel_be;
}
if (info->init) {
ret = info->init(gc);
if (ret)
goto err;
}
raw_spin_lock_irqsave(&gc_lock, flags);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock_irqrestore(&gc_lock, flags);
......@@ -344,6 +346,69 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
tmp += gc_sz;
}
return 0;
err:
while (i--) {
if (dgc->exit)
dgc->exit(dgc->gc[i]);
irq_remove_generic_chip(dgc->gc[i], ~0U, 0, 0);
}
d->gc = NULL;
kfree(dgc);
return ret;
}
EXPORT_SYMBOL_GPL(irq_domain_alloc_generic_chips);
/**
* irq_domain_remove_generic_chips - Remove generic chips from an irq domain
* @d: irq domain for which generic chips are to be removed
*/
void irq_domain_remove_generic_chips(struct irq_domain *d)
{
struct irq_domain_chip_generic *dgc = d->gc;
unsigned int i;
if (!dgc)
return;
for (i = 0; i < dgc->num_chips; i++) {
if (dgc->exit)
dgc->exit(dgc->gc[i]);
irq_remove_generic_chip(dgc->gc[i], ~0U, 0, 0);
}
d->gc = NULL;
kfree(dgc);
}
EXPORT_SYMBOL_GPL(irq_domain_remove_generic_chips);
/**
* __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
* @d: irq domain for which to allocate chips
* @irqs_per_chip: Number of interrupts each chip handles (max 32)
* @num_ct: Number of irq_chip_type instances associated with this
* @name: Name of the irq chip
* @handler: Default flow handler associated with these chips
* @clr: IRQ_* bits to clear in the mapping function
* @set: IRQ_* bits to set in the mapping function
* @gcflags: Generic chip specific setup flags
*/
int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
unsigned int clr, unsigned int set,
enum irq_gc_flags gcflags)
{
struct irq_domain_chip_generic_info info = {
.irqs_per_chip = irqs_per_chip,
.num_ct = num_ct,
.name = name,
.handler = handler,
.irq_flags_to_clear = clr,
.irq_flags_to_set = set,
.gc_flags = gcflags,
};
return irq_domain_alloc_generic_chips(d, &info);
}
EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
......
......@@ -501,6 +501,16 @@ static inline struct irq_data *irqd_get_parent_data(struct irq_data *irqd)
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
#include <linux/debugfs.h>
struct irq_bit_descr {
unsigned int mask;
char *name;
};
#define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
const struct irq_bit_descr *sd, int size);
void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
{
......
This diff is collapsed.
......@@ -461,10 +461,10 @@ int show_interrupts(struct seq_file *p, void *v)
{
static int prec;
unsigned long flags, any_count = 0;
int i = *(loff_t *) v, j;
struct irqaction *action;
struct irq_desc *desc;
unsigned long flags;
if (i > ACTUAL_NR_IRQS)
return 0;
......@@ -488,10 +488,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc || irq_settings_is_hidden(desc))
goto outsparse;
if (desc->kstat_irqs)
any_count = kstat_irqs_desc(desc, cpu_online_mask);
if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs)
goto outsparse;
seq_printf(p, "%*d: ", prec, i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment