Commit 6de9eb21 authored by Huacai Chen's avatar Huacai Chen

Merge 'irq/loongarch', 'pci/ctrl/loongson' and 'pci/header-cleanup-immutable'

LoongArch architecture changes for 5.20 depend on the irqchip and pci
changes to work, so merge them to create a base.
...@@ -365,13 +365,4 @@ extern void free_dma(unsigned int dmanr); /* release it again */ ...@@ -365,13 +365,4 @@ extern void free_dma(unsigned int dmanr); /* release it again */
#define KERNEL_HAVE_CHECK_DMA #define KERNEL_HAVE_CHECK_DMA
extern int check_dma(unsigned int dmanr); extern int check_dma(unsigned int dmanr);
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */ #endif /* _ASM_DMA_H */
...@@ -56,12 +56,6 @@ struct pci_controller { ...@@ -56,12 +56,6 @@ struct pci_controller {
/* IOMMU controls. */ /* IOMMU controls. */
/* TODO: integrate with include/asm-generic/pci.h ? */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus) static inline int pci_proc_domain(struct pci_bus *bus)
......
...@@ -7,10 +7,5 @@ ...@@ -7,10 +7,5 @@
#define ASM_ARC_DMA_H #define ASM_ARC_DMA_H
#define MAX_DMA_ADDRESS 0xC0000000 #define MAX_DMA_ADDRESS 0xC0000000
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy 0
#endif
#endif #endif
...@@ -143,10 +143,4 @@ extern int get_dma_residue(unsigned int chan); ...@@ -143,10 +143,4 @@ extern int get_dma_residue(unsigned int chan);
#endif /* CONFIG_ISA_DMA_API */ #endif /* CONFIG_ISA_DMA_API */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* __ASM_ARM_DMA_H */ #endif /* __ASM_ARM_DMA_H */
...@@ -22,11 +22,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -22,11 +22,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE #define ARCH_GENERIC_PCI_MMAP_RESOURCE
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
extern void pcibios_report_status(unsigned int status_mask, int warn); extern void pcibios_report_status(unsigned int status_mask, int warn);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <asm/io.h> #include <asm/io.h>
#define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0
/* /*
* Set to 1 if the kernel should re-assign all PCI bus numbers * Set to 1 if the kernel should re-assign all PCI bus numbers
...@@ -18,21 +17,8 @@ ...@@ -18,21 +17,8 @@
(pci_has_flag(PCI_REASSIGN_ALL_BUS)) (pci_has_flag(PCI_REASSIGN_ALL_BUS))
#define arch_can_pci_mmap_wc() 1 #define arch_can_pci_mmap_wc() 1
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
extern int isa_dma_bridge_buggy; /* Generic PCI */
#include <asm-generic/pci.h>
#ifdef CONFIG_PCI
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
/* no legacy IRQ on arm64 */
return -ENODEV;
}
static inline int pci_proc_domain(struct pci_bus *bus)
{
return 1;
}
#endif /* CONFIG_PCI */
#endif /* __ASM_PCI_H */ #endif /* __ASM_PCI_H */
...@@ -9,26 +9,7 @@ ...@@ -9,26 +9,7 @@
#include <asm/io.h> #include <asm/io.h>
#define PCIBIOS_MIN_IO 0 /* Generic PCI */
#define PCIBIOS_MIN_MEM 0 #include <asm-generic/pci.h>
/* C-SKY shim does not initialize PCI bus */
#define pcibios_assign_all_busses() 1
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
/* no legacy IRQ on csky */
return -ENODEV;
}
static inline int pci_proc_domain(struct pci_bus *bus)
{
/* always show the domain in /proc */
return 1;
}
#endif /* CONFIG_PCI */
#endif /* __ASM_CSKY_PCI_H */ #endif /* __ASM_CSKY_PCI_H */
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
extern unsigned long MAX_DMA_ADDRESS; extern unsigned long MAX_DMA_ADDRESS;
extern int isa_dma_bridge_buggy;
#define free_dma(x) #define free_dma(x)
#endif /* _ASM_IA64_DMA_H */ #endif /* _ASM_IA64_DMA_H */
...@@ -63,10 +63,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -63,10 +63,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
return (pci_domain_nr(bus) != 0); return (pci_domain_nr(bus) != 0);
} }
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
}
#endif /* _ASM_IA64_PCI_H */ #endif /* _ASM_IA64_PCI_H */
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
config LOONGARCH config LOONGARCH
bool bool
default y default y
select ACPI_GENERIC_GSI if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE select ARCH_BINFMT_ELF_STATE
select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTPLUG
......
...@@ -31,6 +31,148 @@ static inline bool acpi_has_cpu_in_madt(void) ...@@ -31,6 +31,148 @@ static inline bool acpi_has_cpu_in_madt(void)
extern struct list_head acpi_wakeup_device_list; extern struct list_head acpi_wakeup_device_list;
/*
* Temporary definitions until the core ACPICA code gets updated (see
* 1656837932-18257-1-git-send-email-lvjianmin@loongson.cn and its
* follow-ups for the "rationale").
*
* Once the "legal reasons" are cleared and that the code is merged,
* this can be dropped entierely.
*/
#if (ACPI_CA_VERSION == 0x20220331 && !defined(LOONGARCH_ACPICA_EXT))
#define LOONGARCH_ACPICA_EXT 1
#define ACPI_MADT_TYPE_CORE_PIC 17
#define ACPI_MADT_TYPE_LIO_PIC 18
#define ACPI_MADT_TYPE_HT_PIC 19
#define ACPI_MADT_TYPE_EIO_PIC 20
#define ACPI_MADT_TYPE_MSI_PIC 21
#define ACPI_MADT_TYPE_BIO_PIC 22
#define ACPI_MADT_TYPE_LPC_PIC 23
/* Values for Version field above */
enum acpi_madt_core_pic_version {
ACPI_MADT_CORE_PIC_VERSION_NONE = 0,
ACPI_MADT_CORE_PIC_VERSION_V1 = 1,
ACPI_MADT_CORE_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
enum acpi_madt_lio_pic_version {
ACPI_MADT_LIO_PIC_VERSION_NONE = 0,
ACPI_MADT_LIO_PIC_VERSION_V1 = 1,
ACPI_MADT_LIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
enum acpi_madt_eio_pic_version {
ACPI_MADT_EIO_PIC_VERSION_NONE = 0,
ACPI_MADT_EIO_PIC_VERSION_V1 = 1,
ACPI_MADT_EIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
enum acpi_madt_ht_pic_version {
ACPI_MADT_HT_PIC_VERSION_NONE = 0,
ACPI_MADT_HT_PIC_VERSION_V1 = 1,
ACPI_MADT_HT_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
enum acpi_madt_bio_pic_version {
ACPI_MADT_BIO_PIC_VERSION_NONE = 0,
ACPI_MADT_BIO_PIC_VERSION_V1 = 1,
ACPI_MADT_BIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
enum acpi_madt_msi_pic_version {
ACPI_MADT_MSI_PIC_VERSION_NONE = 0,
ACPI_MADT_MSI_PIC_VERSION_V1 = 1,
ACPI_MADT_MSI_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
enum acpi_madt_lpc_pic_version {
ACPI_MADT_LPC_PIC_VERSION_NONE = 0,
ACPI_MADT_LPC_PIC_VERSION_V1 = 1,
ACPI_MADT_LPC_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
#pragma pack(1)
/* Core Interrupt Controller */
struct acpi_madt_core_pic {
struct acpi_subtable_header header;
u8 version;
u32 processor_id;
u32 core_id;
u32 flags;
};
/* Legacy I/O Interrupt Controller */
struct acpi_madt_lio_pic {
struct acpi_subtable_header header;
u8 version;
u64 address;
u16 size;
u8 cascade[2];
u32 cascade_map[2];
};
/* Extend I/O Interrupt Controller */
struct acpi_madt_eio_pic {
struct acpi_subtable_header header;
u8 version;
u8 cascade;
u8 node;
u64 node_map;
};
/* HT Interrupt Controller */
struct acpi_madt_ht_pic {
struct acpi_subtable_header header;
u8 version;
u64 address;
u16 size;
u8 cascade[8];
};
/* Bridge I/O Interrupt Controller */
struct acpi_madt_bio_pic {
struct acpi_subtable_header header;
u8 version;
u64 address;
u16 size;
u16 id;
u16 gsi_base;
};
/* MSI Interrupt Controller */
struct acpi_madt_msi_pic {
struct acpi_subtable_header header;
u8 version;
u64 msg_address;
u32 start;
u32 count;
};
/* LPC Interrupt Controller */
struct acpi_madt_lpc_pic {
struct acpi_subtable_header header;
u8 version;
u64 address;
u16 size;
u8 cascade;
};
#pragma pack()
#endif
#endif /* !CONFIG_ACPI */ #endif /* !CONFIG_ACPI */
#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT #define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
......
...@@ -35,9 +35,6 @@ static inline bool on_irq_stack(int cpu, unsigned long sp) ...@@ -35,9 +35,6 @@ static inline bool on_irq_stack(int cpu, unsigned long sp)
return (low <= sp && sp <= high); return (low <= sp && sp <= high);
} }
int get_ipi_irq(void);
int get_pmc_irq(void);
int get_timer_irq(void);
void spurious_interrupt(void); void spurious_interrupt(void);
#define NR_IRQS_LEGACY 16 #define NR_IRQS_LEGACY 16
...@@ -48,6 +45,14 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_sel ...@@ -48,6 +45,14 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_sel
#define MAX_IO_PICS 2 #define MAX_IO_PICS 2
#define NR_IRQS (64 + (256 * MAX_IO_PICS)) #define NR_IRQS (64 + (256 * MAX_IO_PICS))
struct acpi_vector_group {
int node;
int pci_segment;
struct irq_domain *parent;
};
extern struct acpi_vector_group pch_group[MAX_IO_PICS];
extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define CORES_PER_EIO_NODE 4 #define CORES_PER_EIO_NODE 4
#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ #define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */
...@@ -79,15 +84,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_sel ...@@ -79,15 +84,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_sel
extern int find_pch_pic(u32 gsi); extern int find_pch_pic(u32 gsi);
extern int eiointc_get_node(int id); extern int eiointc_get_node(int id);
static inline void eiointc_enable(void)
{
uint64_t misc;
misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
}
struct acpi_madt_lio_pic; struct acpi_madt_lio_pic;
struct acpi_madt_eio_pic; struct acpi_madt_eio_pic;
struct acpi_madt_ht_pic; struct acpi_madt_ht_pic;
...@@ -95,21 +91,29 @@ struct acpi_madt_bio_pic; ...@@ -95,21 +91,29 @@ struct acpi_madt_bio_pic;
struct acpi_madt_msi_pic; struct acpi_madt_msi_pic;
struct acpi_madt_lpc_pic; struct acpi_madt_lpc_pic;
struct irq_domain *loongarch_cpu_irq_init(void); int liointc_acpi_init(struct irq_domain *parent,
struct irq_domain *liointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lio_pic *acpi_liointc); struct acpi_madt_lio_pic *acpi_liointc);
struct irq_domain *eiointc_acpi_init(struct irq_domain *parent, int eiointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_eio_pic *acpi_eiointc); struct acpi_madt_eio_pic *acpi_eiointc);
struct irq_domain *htvec_acpi_init(struct irq_domain *parent, struct irq_domain *htvec_acpi_init(struct irq_domain *parent,
struct acpi_madt_ht_pic *acpi_htvec); struct acpi_madt_ht_pic *acpi_htvec);
struct irq_domain *pch_lpc_acpi_init(struct irq_domain *parent, int pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc); struct acpi_madt_lpc_pic *acpi_pchlpc);
struct irq_domain *pch_msi_acpi_init(struct irq_domain *parent, #if IS_ENABLED(CONFIG_LOONGSON_PCH_MSI)
int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi); struct acpi_madt_msi_pic *acpi_pchmsi);
struct irq_domain *pch_pic_acpi_init(struct irq_domain *parent, #else
static inline int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi)
{
return 0;
}
#endif
int pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic); struct acpi_madt_bio_pic *acpi_pchpic);
int find_pch_pic(u32 gsi);
struct fwnode_handle *get_pch_msi_handle(int pci_segment);
extern struct acpi_madt_lio_pic *acpi_liointc; extern struct acpi_madt_lio_pic *acpi_liointc;
extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS]; extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS];
...@@ -119,11 +123,10 @@ extern struct acpi_madt_lpc_pic *acpi_pchlpc; ...@@ -119,11 +123,10 @@ extern struct acpi_madt_lpc_pic *acpi_pchlpc;
extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS]; extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS];
extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS]; extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS];
extern struct irq_domain *cpu_domain; extern struct fwnode_handle *cpuintc_handle;
extern struct irq_domain *liointc_domain; extern struct fwnode_handle *liointc_handle;
extern struct irq_domain *pch_lpc_domain; extern struct fwnode_handle *pch_lpc_handle;
extern struct irq_domain *pch_msi_domain[MAX_IO_PICS]; extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev); extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
......
...@@ -25,7 +25,6 @@ EXPORT_SYMBOL(acpi_pci_disabled); ...@@ -25,7 +25,6 @@ EXPORT_SYMBOL(acpi_pci_disabled);
int acpi_strict = 1; /* We have no workarounds on LoongArch */ int acpi_strict = 1; /* We have no workarounds on LoongArch */
int num_processors; int num_processors;
int disabled_cpus; int disabled_cpus;
enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
u64 acpi_saved_sp; u64 acpi_saved_sp;
...@@ -33,70 +32,6 @@ u64 acpi_saved_sp; ...@@ -33,70 +32,6 @@ u64 acpi_saved_sp;
#define PREFIX "ACPI: " #define PREFIX "ACPI: "
int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
{
if (irqp != NULL)
*irqp = acpi_register_gsi(NULL, gsi, -1, -1);
return (*irqp >= 0) ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi)
{
if (gsi)
*gsi = isa_irq;
return 0;
}
/*
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
{
struct irq_fwspec fwspec;
switch (gsi) {
case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
fwspec.fwnode = liointc_domain->fwnode;
fwspec.param[0] = gsi - GSI_MIN_CPU_IRQ;
fwspec.param_count = 1;
return irq_create_fwspec_mapping(&fwspec);
case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
if (!pch_lpc_domain)
return -EINVAL;
fwspec.fwnode = pch_lpc_domain->fwnode;
fwspec.param[0] = gsi - GSI_MIN_LPC_IRQ;
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2;
return irq_create_fwspec_mapping(&fwspec);
case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
if (!pch_pic_domain[0])
return -EINVAL;
fwspec.fwnode = pch_pic_domain[0]->fwnode;
fwspec.param[0] = gsi - GSI_MIN_PCH_IRQ;
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
fwspec.param_count = 2;
return irq_create_fwspec_mapping(&fwspec);
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_register_gsi);
void acpi_unregister_gsi(u32 gsi)
{
}
EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size) void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
{ {
......
...@@ -25,12 +25,8 @@ DEFINE_PER_CPU(unsigned long, irq_stack); ...@@ -25,12 +25,8 @@ DEFINE_PER_CPU(unsigned long, irq_stack);
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat);
struct irq_domain *cpu_domain; struct acpi_vector_group pch_group[MAX_IO_PICS];
struct irq_domain *liointc_domain; struct acpi_vector_group msi_group[MAX_IO_PICS];
struct irq_domain *pch_lpc_domain;
struct irq_domain *pch_msi_domain[MAX_IO_PICS];
struct irq_domain *pch_pic_domain[MAX_IO_PICS];
/* /*
* 'what should we do if we get a hw irq event on an illegal vector'. * 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves. * each architecture has to answer this themselves.
...@@ -56,6 +52,51 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -56,6 +52,51 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0; return 0;
} }
static int __init early_pci_mcfg_parse(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
struct acpi_mcfg_allocation *mptr;
int i, n;
if (header->length < sizeof(struct acpi_table_mcfg))
return -EINVAL;
n = (header->length - sizeof(struct acpi_table_mcfg)) /
sizeof(struct acpi_mcfg_allocation);
mcfg = (struct acpi_table_mcfg *)header;
mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
for (i = 0; i < n; i++, mptr++) {
msi_group[i].pci_segment = mptr->pci_segment;
pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf;
}
return 0;
}
static void __init init_vec_parent_group(void)
{
int i;
for (i = 0; i < MAX_IO_PICS; i++) {
msi_group[i].pci_segment = -1;
msi_group[i].node = -1;
pch_group[i].node = -1;
}
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
static int __init get_ipi_irq(void)
{
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
if (d)
return irq_create_mapping(d, EXCCODE_IPI - EXCCODE_INT_START);
return -EINVAL;
}
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
int i; int i;
...@@ -69,9 +110,12 @@ void __init init_IRQ(void) ...@@ -69,9 +110,12 @@ void __init init_IRQ(void)
clear_csr_ecfg(ECFG0_IM); clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP); clear_csr_estat(ESTATF_IP);
init_vec_parent_group();
irqchip_init(); irqchip_init();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
ipi_irq = EXCCODE_IPI - EXCCODE_INT_START; ipi_irq = get_ipi_irq();
if (ipi_irq < 0)
panic("IPI IRQ mapping failed\n");
irq_set_percpu_devid(ipi_irq); irq_set_percpu_devid(ipi_irq);
r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev); r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
if (r < 0) if (r < 0)
......
...@@ -123,6 +123,16 @@ void sync_counter(void) ...@@ -123,6 +123,16 @@ void sync_counter(void)
csr_write64(-init_timeval, LOONGARCH_CSR_CNTC); csr_write64(-init_timeval, LOONGARCH_CSR_CNTC);
} }
static int get_timer_irq(void)
{
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
if (d)
return irq_create_mapping(d, EXCCODE_TIMER - EXCCODE_INT_START);
return -EINVAL;
}
int constant_clockevent_init(void) int constant_clockevent_init(void)
{ {
unsigned int irq; unsigned int irq;
...@@ -132,7 +142,9 @@ int constant_clockevent_init(void) ...@@ -132,7 +142,9 @@ int constant_clockevent_init(void)
struct clock_event_device *cd; struct clock_event_device *cd;
static int timer_irq_installed = 0; static int timer_irq_installed = 0;
irq = EXCCODE_TIMER - EXCCODE_INT_START; irq = get_timer_irq();
if (irq < 0)
pr_err("Failed to map irq %d (timer)\n", irq);
cd = &per_cpu(constant_clockevent_device, cpu); cd = &per_cpu(constant_clockevent_device, cpu);
......
...@@ -6,10 +6,4 @@ ...@@ -6,10 +6,4 @@
bootmem allocator (but this should do it for this) */ bootmem allocator (but this should do it for this) */
#define MAX_DMA_ADDRESS PAGE_OFFSET #define MAX_DMA_ADDRESS PAGE_OFFSET
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _M68K_DMA_H */ #endif /* _M68K_DMA_H */
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#ifndef _ASM_M68K_PCI_H #ifndef _ASM_M68K_PCI_H
#define _ASM_M68K_PCI_H #define _ASM_M68K_PCI_H
#include <asm-generic/pci.h>
#define pcibios_assign_all_busses() 1 #define pcibios_assign_all_busses() 1
#define PCIBIOS_MIN_IO 0x00000100 #define PCIBIOS_MIN_IO 0x00000100
......
...@@ -9,10 +9,4 @@ ...@@ -9,10 +9,4 @@
/* Virtual address corresponding to last available physical memory address. */ /* Virtual address corresponding to last available physical memory address. */
#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) #define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_MICROBLAZE_DMA_H */ #endif /* _ASM_MICROBLAZE_DMA_H */
...@@ -307,12 +307,4 @@ static __inline__ int get_dma_residue(unsigned int dmanr) ...@@ -307,12 +307,4 @@ static __inline__ int get_dma_residue(unsigned int dmanr)
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */ extern void free_dma(unsigned int dmanr); /* release it again */
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */ #endif /* _ASM_DMA_H */
...@@ -7,8 +7,9 @@ ...@@ -7,8 +7,9 @@
#define NR_MIPS_CPU_IRQS 8 #define NR_MIPS_CPU_IRQS 8
#define NR_MAX_CHAINED_IRQS 40 /* Chained IRQs means those not directly used by devices */ #define NR_MAX_CHAINED_IRQS 40 /* Chained IRQs means those not directly used by devices */
#define NR_IRQS (NR_IRQS_LEGACY + NR_MIPS_CPU_IRQS + NR_MAX_CHAINED_IRQS + 256) #define NR_IRQS (NR_IRQS_LEGACY + NR_MIPS_CPU_IRQS + NR_MAX_CHAINED_IRQS + 256)
#define MAX_IO_PICS 1
#define MIPS_CPU_IRQ_BASE NR_IRQS_LEGACY #define MIPS_CPU_IRQ_BASE NR_IRQS_LEGACY
#define GSI_MIN_CPU_IRQ 0
#include <asm/mach-generic/irq.h> #include <asm/mach-generic/irq.h>
......
...@@ -139,10 +139,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -139,10 +139,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* Do platform specific device initialization at pci_enable_device() time */ /* Do platform specific device initialization at pci_enable_device() time */
extern int pcibios_plat_dev_init(struct pci_dev *dev); extern int pcibios_plat_dev_init(struct pci_dev *dev);
/* Chances are this interrupt is wired PC-style ... */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#endif /* _ASM_PCI_H */ #endif /* _ASM_PCI_H */
...@@ -176,10 +176,4 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) ...@@ -176,10 +176,4 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
#define free_dma(dmanr) #define free_dma(dmanr)
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */ #endif /* _ASM_DMA_H */
...@@ -162,11 +162,6 @@ extern void pcibios_init_bridge(struct pci_dev *); ...@@ -162,11 +162,6 @@ extern void pcibios_init_bridge(struct pci_dev *);
#define PCIBIOS_MIN_IO 0x10 #define PCIBIOS_MIN_IO 0x10
#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */ #define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE #define ARCH_GENERIC_PCI_MMAP_RESOURCE
......
...@@ -340,11 +340,5 @@ extern int request_dma(unsigned int dmanr, const char *device_id); ...@@ -340,11 +340,5 @@ extern int request_dma(unsigned int dmanr, const char *device_id);
/* release it again */ /* release it again */
extern void free_dma(unsigned int dmanr); extern void free_dma(unsigned int dmanr);
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DMA_H */ #endif /* _ASM_POWERPC_DMA_H */
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
#define pcibios_assign_all_busses() \ #define pcibios_assign_all_busses() \
(pci_has_flag(PCI_REASSIGN_ALL_BUS)) (pci_has_flag(PCI_REASSIGN_ALL_BUS))
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{ {
if (ppc_md.pci_get_legacy_ide_irq) if (ppc_md.pci_get_legacy_ide_irq)
......
...@@ -12,31 +12,7 @@ ...@@ -12,31 +12,7 @@
#include <asm/io.h> #include <asm/io.h>
#define PCIBIOS_MIN_IO 0 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
#define PCIBIOS_MIN_MEM 0
/* RISC-V shim does not initialize PCI bus */
#define pcibios_assign_all_busses() 1
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
/* no legacy IRQ on risc-v */
return -ENODEV;
}
static inline int pci_proc_domain(struct pci_bus *bus)
{
/* always show the domain in /proc */
return 1;
}
#ifdef CONFIG_NUMA
static inline int pcibus_to_node(struct pci_bus *bus) static inline int pcibus_to_node(struct pci_bus *bus)
{ {
return dev_to_node(&bus->dev); return dev_to_node(&bus->dev);
...@@ -46,8 +22,9 @@ static inline int pcibus_to_node(struct pci_bus *bus) ...@@ -46,8 +22,9 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \ cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus))) cpumask_of_node(pcibus_to_node(bus)))
#endif #endif
#endif /* CONFIG_NUMA */ #endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */
#endif /* CONFIG_PCI */ /* Generic PCI */
#include <asm-generic/pci.h>
#endif /* _ASM_RISCV_PCI_H */ #endif /* _ASM_RISCV_PCI_H */
...@@ -11,10 +11,4 @@ ...@@ -11,10 +11,4 @@
*/ */
#define MAX_DMA_ADDRESS 0x80000000 #define MAX_DMA_ADDRESS 0x80000000
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_S390_DMA_H */ #endif /* _ASM_S390_DMA_H */
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/pci_hotplug.h> #include <linux/pci_hotplug.h>
#include <asm-generic/pci.h>
#include <asm/pci_clp.h> #include <asm/pci_clp.h>
#include <asm/pci_debug.h> #include <asm/pci_debug.h>
#include <asm/sclp.h> #include <asm/sclp.h>
......
...@@ -137,10 +137,4 @@ extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist); ...@@ -137,10 +137,4 @@ extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *); extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *);
extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *); extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *);
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* __ASM_SH_DMA_H */ #endif /* __ASM_SH_DMA_H */
...@@ -88,10 +88,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -88,10 +88,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
return hose->need_domain_info; return hose->need_domain_info;
} }
/* Chances are this interrupt is wired PC-style ... */
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#endif /* __ASM_SH_PCI_H */ #endif /* __ASM_SH_PCI_H */
...@@ -82,14 +82,6 @@ ...@@ -82,14 +82,6 @@
#define DMA_BURST64 0x40 #define DMA_BURST64 0x40
#define DMA_BURSTBITS 0x7f #define DMA_BURSTBITS 0x7f
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#ifdef CONFIG_SPARC32 #ifdef CONFIG_SPARC32
struct device; struct device;
......
...@@ -40,13 +40,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -40,13 +40,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define get_pci_unmapped_area get_fb_unmapped_area #define get_pci_unmapped_area get_fb_unmapped_area
#endif /* CONFIG_SPARC64 */ #endif /* CONFIG_SPARC64 */
#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI)
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return PCI_IRQ_NONE;
}
#else
#include <asm-generic/pci.h>
#endif
#endif /* ___ASM_SPARC_PCI_H */ #endif /* ___ASM_SPARC_PCI_H */
...@@ -4,28 +4,8 @@ ...@@ -4,28 +4,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/io.h> #include <asm/io.h>
#define PCIBIOS_MIN_IO 0 /* Generic PCI */
#define PCIBIOS_MIN_MEM 0 #include <asm-generic/pci.h>
#define pcibios_assign_all_busses() 1
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
/* no legacy IRQs */
return -ENODEV;
}
#endif
#ifdef CONFIG_PCI_DOMAINS
static inline int pci_proc_domain(struct pci_bus *bus)
{
/* always show the domain in /proc */
return 1;
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
/* /*
......
...@@ -307,12 +307,4 @@ extern int request_dma(unsigned int dmanr, const char *device_id); ...@@ -307,12 +307,4 @@ extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr); extern void free_dma(unsigned int dmanr);
#endif #endif
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_X86_DMA_H */ #endif /* _ASM_X86_DMA_H */
...@@ -105,9 +105,6 @@ static inline void early_quirks(void) { } ...@@ -105,9 +105,6 @@ static inline void early_quirks(void) { }
extern void pci_iommu_alloc(void); extern void pci_iommu_alloc(void);
/* generic pci stuff */
#include <asm-generic/pci.h>
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* Returns the node based on pci bus */ /* Returns the node based on pci bus */
static inline int __pcibus_to_node(const struct pci_bus *bus) static inline int __pcibus_to_node(const struct pci_bus *bus)
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/isa-dma.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <linux/io.h> #include <linux/io.h>
......
...@@ -52,11 +52,4 @@ ...@@ -52,11 +52,4 @@
extern int request_dma(unsigned int dmanr, const char * device_id); extern int request_dma(unsigned int dmanr, const char * device_id);
extern void free_dma(unsigned int dmanr); extern void free_dma(unsigned int dmanr);
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif #endif
...@@ -43,7 +43,4 @@ ...@@ -43,7 +43,4 @@
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
#define arch_can_pci_mmap_io() 1 #define arch_can_pci_mmap_io() 1
/* Generic PCI */
#include <asm-generic/pci.h>
#endif /* _XTENSA_PCI_H */ #endif /* _XTENSA_PCI_H */
...@@ -1144,6 +1144,9 @@ static int __init acpi_bus_init_irq(void) ...@@ -1144,6 +1144,9 @@ static int __init acpi_bus_init_irq(void)
case ACPI_IRQ_MODEL_PLATFORM: case ACPI_IRQ_MODEL_PLATFORM:
message = "platform specific model"; message = "platform specific model";
break; break;
case ACPI_IRQ_MODEL_LPIC:
message = "LPIC";
break;
default: default:
pr_info("Unknown interrupt routing model\n"); pr_info("Unknown interrupt routing model\n");
return -ENODEV; return -ENODEV;
......
...@@ -12,7 +12,8 @@ ...@@ -12,7 +12,8 @@
enum acpi_irq_model_id acpi_irq_model; enum acpi_irq_model_id acpi_irq_model;
static struct fwnode_handle *acpi_gsi_domain_id; static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/** /**
* acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
...@@ -26,14 +27,18 @@ static struct fwnode_handle *acpi_gsi_domain_id; ...@@ -26,14 +27,18 @@ static struct fwnode_handle *acpi_gsi_domain_id;
*/ */
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{ {
struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, struct irq_domain *d;
DOMAIN_BUS_ANY);
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
DOMAIN_BUS_ANY);
*irq = irq_find_mapping(d, gsi); *irq = irq_find_mapping(d, gsi);
/* /*
* *irq == 0 means no mapping, that should * *irq == 0 means no mapping, that should be reported as a
* be reported as a failure * failure, unless there is an arch-specific fallback handler.
*/ */
if (!*irq && acpi_gsi_to_irq_fallback)
*irq = acpi_gsi_to_irq_fallback(gsi);
return (*irq > 0) ? 0 : -EINVAL; return (*irq > 0) ? 0 : -EINVAL;
} }
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
...@@ -53,12 +58,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, ...@@ -53,12 +58,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
{ {
struct irq_fwspec fwspec; struct irq_fwspec fwspec;
if (WARN_ON(!acpi_gsi_domain_id)) { fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
if (WARN_ON(!fwspec.fwnode)) {
pr_warn("GSI: No registered irqchip, giving up\n"); pr_warn("GSI: No registered irqchip, giving up\n");
return -EINVAL; return -EINVAL;
} }
fwspec.fwnode = acpi_gsi_domain_id;
fwspec.param[0] = gsi; fwspec.param[0] = gsi;
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity); fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2; fwspec.param_count = 2;
...@@ -73,13 +78,14 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi); ...@@ -73,13 +78,14 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi);
*/ */
void acpi_unregister_gsi(u32 gsi) void acpi_unregister_gsi(u32 gsi)
{ {
struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, struct irq_domain *d;
DOMAIN_BUS_ANY);
int irq; int irq;
if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16)) if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16))
return; return;
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
DOMAIN_BUS_ANY);
irq = irq_find_mapping(d, gsi); irq = irq_find_mapping(d, gsi);
irq_dispose_mapping(irq); irq_dispose_mapping(irq);
} }
...@@ -97,7 +103,8 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); ...@@ -97,7 +103,8 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
* The referenced device fwhandle or NULL on failure * The referenced device fwhandle or NULL on failure
*/ */
static struct fwnode_handle * static struct fwnode_handle *
acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source,
u32 gsi)
{ {
struct fwnode_handle *result; struct fwnode_handle *result;
struct acpi_device *device; struct acpi_device *device;
...@@ -105,7 +112,7 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source) ...@@ -105,7 +112,7 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source)
acpi_status status; acpi_status status;
if (!source->string_length) if (!source->string_length)
return acpi_gsi_domain_id; return acpi_get_gsi_domain_id(gsi);
status = acpi_get_handle(NULL, source->string_ptr, &handle); status = acpi_get_handle(NULL, source->string_ptr, &handle);
if (WARN_ON(ACPI_FAILURE(status))) if (WARN_ON(ACPI_FAILURE(status)))
...@@ -194,7 +201,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ...@@ -194,7 +201,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
ctx->index -= irq->interrupt_count; ctx->index -= irq->interrupt_count;
return AE_OK; return AE_OK;
} }
fwnode = acpi_gsi_domain_id; fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index], acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity, irq->triggering, irq->polarity,
irq->shareable, ctx); irq->shareable, ctx);
...@@ -207,7 +214,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, ...@@ -207,7 +214,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
ctx->index -= eirq->interrupt_count; ctx->index -= eirq->interrupt_count;
return AE_OK; return AE_OK;
} }
fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source); fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source,
eirq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index], acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity, eirq->triggering, eirq->polarity,
eirq->shareable, ctx); eirq->shareable, ctx);
...@@ -291,10 +299,20 @@ EXPORT_SYMBOL_GPL(acpi_irq_get); ...@@ -291,10 +299,20 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* GSI interrupts * GSI interrupts
*/ */
void __init acpi_set_irq_model(enum acpi_irq_model_id model, void __init acpi_set_irq_model(enum acpi_irq_model_id model,
struct fwnode_handle *fwnode) struct fwnode_handle *(*fn)(u32))
{ {
acpi_irq_model = model; acpi_irq_model = model;
acpi_gsi_domain_id = fwnode; acpi_get_gsi_domain_id = fn;
}
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
* @fn: arch-specific fallback handler
*/
void __init acpi_set_gsi_to_irq_fallback(u32 (*fn)(u32))
{
acpi_gsi_to_irq_fallback = fn;
} }
/** /**
...@@ -312,8 +330,14 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, ...@@ -312,8 +330,14 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
const struct irq_domain_ops *ops, const struct irq_domain_ops *ops,
void *host_data) void *host_data)
{ {
struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, struct irq_domain *d;
DOMAIN_BUS_ANY);
/* This only works for the GIC model... */
if (acpi_irq_model != ACPI_IRQ_MODEL_GIC)
return NULL;
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(0),
DOMAIN_BUS_ANY);
if (!d) if (!d)
return NULL; return NULL;
......
...@@ -41,6 +41,8 @@ struct mcfg_fixup { ...@@ -41,6 +41,8 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = { static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
#ifdef CONFIG_ARM64
#define AL_ECAM(table_id, rev, seg, ops) \ #define AL_ECAM(table_id, rev, seg, ops) \
{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
...@@ -169,6 +171,17 @@ static struct mcfg_fixup mcfg_quirks[] = { ...@@ -169,6 +171,17 @@ static struct mcfg_fixup mcfg_quirks[] = {
ALTRA_ECAM_QUIRK(1, 13), ALTRA_ECAM_QUIRK(1, 13),
ALTRA_ECAM_QUIRK(1, 14), ALTRA_ECAM_QUIRK(1, 14),
ALTRA_ECAM_QUIRK(1, 15), ALTRA_ECAM_QUIRK(1, 15),
#endif /* ARM64 */
#ifdef CONFIG_LOONGARCH
#define LOONGSON_ECAM_MCFG(table_id, seg) \
{ "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops }
LOONGSON_ECAM_MCFG("\0", 0),
LOONGSON_ECAM_MCFG("LOONGSON", 0),
LOONGSON_ECAM_MCFG("\0", 1),
LOONGSON_ECAM_MCFG("LOONGSON", 1),
#endif /* LOONGARCH */
}; };
static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/dma.h> #include <linux/isa-dma.h>
#include <linux/comedi/comedidev.h> #include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_isadma.h> #include <linux/comedi/comedi_isadma.h>
......
...@@ -546,6 +546,16 @@ config EXYNOS_IRQ_COMBINER ...@@ -546,6 +546,16 @@ config EXYNOS_IRQ_COMBINER
Say yes here to add support for the IRQ combiner devices embedded Say yes here to add support for the IRQ combiner devices embedded
in Samsung Exynos chips. in Samsung Exynos chips.
config IRQ_LOONGARCH_CPU
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
help
Support for the LoongArch CPU Interrupt Controller. For details of
irq chip hierarchy on LoongArch platforms please read the document
Documentation/loongarch/irq-chip-model.rst.
config LOONGSON_LIOINTC config LOONGSON_LIOINTC
bool "Loongson Local I/O Interrupt Controller" bool "Loongson Local I/O Interrupt Controller"
depends on MACH_LOONGSON64 depends on MACH_LOONGSON64
...@@ -555,6 +565,16 @@ config LOONGSON_LIOINTC ...@@ -555,6 +565,16 @@ config LOONGSON_LIOINTC
help help
Support for the Loongson Local I/O Interrupt Controller. Support for the Loongson Local I/O Interrupt Controller.
config LOONGSON_EIOINTC
bool "Loongson Extend I/O Interrupt Controller"
depends on LOONGARCH
depends on MACH_LOONGSON64
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_CHIP
help
Support for the Loongson3 Extend I/O Interrupt Vector Controller.
config LOONGSON_HTPIC config LOONGSON_HTPIC
bool "Loongson3 HyperTransport PIC Controller" bool "Loongson3 HyperTransport PIC Controller"
depends on MACH_LOONGSON64 && MIPS depends on MACH_LOONGSON64 && MIPS
...@@ -574,7 +594,7 @@ config LOONGSON_HTVEC ...@@ -574,7 +594,7 @@ config LOONGSON_HTVEC
config LOONGSON_PCH_PIC config LOONGSON_PCH_PIC
bool "Loongson PCH PIC Controller" bool "Loongson PCH PIC Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST depends on MACH_LOONGSON64
default MACH_LOONGSON64 default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS select IRQ_FASTEOI_HIERARCHY_HANDLERS
...@@ -583,7 +603,7 @@ config LOONGSON_PCH_PIC ...@@ -583,7 +603,7 @@ config LOONGSON_PCH_PIC
config LOONGSON_PCH_MSI config LOONGSON_PCH_MSI
bool "Loongson PCH MSI Controller" bool "Loongson PCH MSI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST depends on MACH_LOONGSON64
depends on PCI depends on PCI
default MACH_LOONGSON64 default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
...@@ -591,6 +611,14 @@ config LOONGSON_PCH_MSI ...@@ -591,6 +611,14 @@ config LOONGSON_PCH_MSI
help help
Support for the Loongson PCH MSI Controller. Support for the Loongson PCH MSI Controller.
config LOONGSON_PCH_LPC
bool "Loongson PCH LPC Controller"
depends on MACH_LOONGSON64
default (MACH_LOONGSON64 && LOONGARCH)
select IRQ_DOMAIN_HIERARCHY
help
Support for the Loongson PCH LPC Controller.
config MST_IRQ config MST_IRQ
bool "MStar Interrupt Controller" bool "MStar Interrupt Controller"
depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST
......
...@@ -103,11 +103,14 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o ...@@ -103,11 +103,14 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o
obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
obj-$(CONFIG_LOONGSON_PCH_LPC) += irq-loongson-pch-lpc.o
obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
......
...@@ -2381,11 +2381,17 @@ static void __init gic_acpi_setup_kvm_info(void) ...@@ -2381,11 +2381,17 @@ static void __init gic_acpi_setup_kvm_info(void)
vgic_set_kvm_info(&gic_v3_kvm_info); vgic_set_kvm_info(&gic_v3_kvm_info);
} }
static struct fwnode_handle *gsi_domain_handle;
static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
{
return gsi_domain_handle;
}
static int __init static int __init
gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
{ {
struct acpi_madt_generic_distributor *dist; struct acpi_madt_generic_distributor *dist;
struct fwnode_handle *domain_handle;
size_t size; size_t size;
int i, err; int i, err;
...@@ -2417,18 +2423,18 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) ...@@ -2417,18 +2423,18 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
if (err) if (err)
goto out_redist_unmap; goto out_redist_unmap;
domain_handle = irq_domain_alloc_fwnode(&dist->base_address); gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
if (!domain_handle) { if (!gsi_domain_handle) {
err = -ENOMEM; err = -ENOMEM;
goto out_redist_unmap; goto out_redist_unmap;
} }
err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
acpi_data.nr_redist_regions, 0, domain_handle); acpi_data.nr_redist_regions, 0, gsi_domain_handle);
if (err) if (err)
goto out_fwhandle_free; goto out_fwhandle_free;
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
if (static_branch_likely(&supports_deactivate_key)) if (static_branch_likely(&supports_deactivate_key))
gic_acpi_setup_kvm_info(); gic_acpi_setup_kvm_info();
...@@ -2436,7 +2442,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) ...@@ -2436,7 +2442,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
return 0; return 0;
out_fwhandle_free: out_fwhandle_free:
irq_domain_free_fwnode(domain_handle); irq_domain_free_fwnode(gsi_domain_handle);
out_redist_unmap: out_redist_unmap:
for (i = 0; i < acpi_data.nr_redist_regions; i++) for (i = 0; i < acpi_data.nr_redist_regions; i++)
if (acpi_data.redist_regs[i].redist_base) if (acpi_data.redist_regs[i].redist_base)
......
...@@ -1682,11 +1682,17 @@ static void __init gic_acpi_setup_kvm_info(void) ...@@ -1682,11 +1682,17 @@ static void __init gic_acpi_setup_kvm_info(void)
vgic_set_kvm_info(&gic_v2_kvm_info); vgic_set_kvm_info(&gic_v2_kvm_info);
} }
static struct fwnode_handle *gsi_domain_handle;
static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi)
{
return gsi_domain_handle;
}
static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
const unsigned long end) const unsigned long end)
{ {
struct acpi_madt_generic_distributor *dist; struct acpi_madt_generic_distributor *dist;
struct fwnode_handle *domain_handle;
struct gic_chip_data *gic = &gic_data[0]; struct gic_chip_data *gic = &gic_data[0];
int count, ret; int count, ret;
...@@ -1724,22 +1730,22 @@ static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, ...@@ -1724,22 +1730,22 @@ static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
/* /*
* Initialize GIC instance zero (no multi-GIC support). * Initialize GIC instance zero (no multi-GIC support).
*/ */
domain_handle = irq_domain_alloc_fwnode(&dist->base_address); gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
if (!domain_handle) { if (!gsi_domain_handle) {
pr_err("Unable to allocate domain handle\n"); pr_err("Unable to allocate domain handle\n");
gic_teardown(gic); gic_teardown(gic);
return -ENOMEM; return -ENOMEM;
} }
ret = __gic_init_bases(gic, domain_handle); ret = __gic_init_bases(gic, gsi_domain_handle);
if (ret) { if (ret) {
pr_err("Failed to initialise GIC\n"); pr_err("Failed to initialise GIC\n");
irq_domain_free_fwnode(domain_handle); irq_domain_free_fwnode(gsi_domain_handle);
gic_teardown(gic); gic_teardown(gic);
return ret; return ret;
} }
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id);
if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(NULL, gic_data[0].domain); gicv2m_init(NULL, gic_data[0].domain);
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
static struct irq_domain *irq_domain;
struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi)
{
/* Only pch irqdomain transferring is required for LoongArch. */
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
return 0;
}
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
{
int id;
struct fwnode_handle *domain_handle = NULL;
switch (gsi) {
case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
if (liointc_handle)
domain_handle = liointc_handle;
break;
case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
if (pch_lpc_handle)
domain_handle = pch_lpc_handle;
break;
case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
id = find_pch_pic(gsi);
if (id >= 0 && pch_pic_handle[id])
domain_handle = pch_pic_handle[id];
break;
}
return domain_handle;
}
static void mask_loongarch_irq(struct irq_data *d)
{
clear_csr_ecfg(ECFGF(d->hwirq));
}
static void unmask_loongarch_irq(struct irq_data *d)
{
set_csr_ecfg(ECFGF(d->hwirq));
}
static struct irq_chip cpu_irq_controller = {
.name = "CPUINTC",
.irq_mask = mask_loongarch_irq,
.irq_unmask = unmask_loongarch_irq,
};
static void handle_cpu_irq(struct pt_regs *regs)
{
int hwirq;
unsigned int estat = read_csr_estat() & CSR_ESTAT_IS;
while ((hwirq = ffs(estat))) {
estat &= ~BIT(hwirq - 1);
generic_handle_domain_irq(irq_domain, hwirq - 1);
}
}
static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_noprobe(irq);
irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq);
return 0;
}
static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
.map = loongarch_cpu_intc_map,
.xlate = irq_domain_xlate_onecell,
};
static int __init
liointc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
return liointc_acpi_init(irq_domain, liointc_entry);
}
static int __init
eiointc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
return eiointc_acpi_init(irq_domain, eiointc_entry);
}
static int __init acpi_cascade_irqdomain_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC,
liointc_parse_madt, 0);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
eiointc_parse_madt, 0);
return 0;
}
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
if (irq_domain)
return 0;
/* Mask interrupts. */
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
cpuintc_handle = irq_domain_alloc_fwnode(NULL);
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
if (!irq_domain)
panic("Failed to add irqdomain for LoongArch CPU");
set_handle_irq(&handle_cpu_irq);
acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
acpi_cascade_irqdomain_init();
return 0;
}
IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init);
This diff is collapsed.
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#endif #endif
#define LIOINTC_CHIP_IRQ 32 #define LIOINTC_CHIP_IRQ 32
#define LIOINTC_NUM_PARENT 4 #define LIOINTC_NUM_PARENT 4
#define LIOINTC_NUM_CORES 4 #define LIOINTC_NUM_CORES 4
#define LIOINTC_INTC_CHIP_START 0x20 #define LIOINTC_INTC_CHIP_START 0x20
...@@ -58,6 +58,8 @@ struct liointc_priv { ...@@ -58,6 +58,8 @@ struct liointc_priv {
bool has_lpc_irq_errata; bool has_lpc_irq_errata;
}; };
struct fwnode_handle *liointc_handle;
static void liointc_chained_handle_irq(struct irq_desc *desc) static void liointc_chained_handle_irq(struct irq_desc *desc)
{ {
struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
...@@ -153,97 +155,79 @@ static void liointc_resume(struct irq_chip_generic *gc) ...@@ -153,97 +155,79 @@ static void liointc_resume(struct irq_chip_generic *gc)
irq_gc_unlock_irqrestore(gc, flags); irq_gc_unlock_irqrestore(gc, flags);
} }
static const char * const parent_names[] = {"int0", "int1", "int2", "int3"}; static int parent_irq[LIOINTC_NUM_PARENT];
static const char * const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; static u32 parent_int_map[LIOINTC_NUM_PARENT];
static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
static void __iomem *liointc_get_reg_byname(struct device_node *node, static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const char *name) const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{ {
int index = of_property_match_string(node, "reg-names", name); if (WARN_ON(intsize < 1))
return -EINVAL;
if (index < 0) *out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
return NULL; *out_type = IRQ_TYPE_NONE;
return 0;
return of_iomap(node, index);
} }
static int __init liointc_of_init(struct device_node *node, static const struct irq_domain_ops acpi_irq_gc_ops = {
struct device_node *parent) .map = irq_map_generic_chip,
.unmap = irq_unmap_generic_chip,
.xlate = liointc_domain_xlate,
};
static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
struct fwnode_handle *domain_handle, struct device_node *node)
{ {
int i, err;
void __iomem *base;
struct irq_chip_type *ct;
struct irq_chip_generic *gc; struct irq_chip_generic *gc;
struct irq_domain *domain; struct irq_domain *domain;
struct irq_chip_type *ct;
struct liointc_priv *priv; struct liointc_priv *priv;
void __iomem *base;
u32 of_parent_int_map[LIOINTC_NUM_PARENT];
int parent_irq[LIOINTC_NUM_PARENT];
bool have_parent = FALSE;
int sz, i, err = 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
if (of_device_is_compatible(node, "loongson,liointc-2.0")) { base = ioremap(addr, size);
base = liointc_get_reg_byname(node, "main"); if (!base)
if (!base) { goto out_free_priv;
err = -ENODEV;
goto out_free_priv;
}
for (i = 0; i < LIOINTC_NUM_CORES; i++) for (i = 0; i < LIOINTC_NUM_CORES; i++)
priv->core_isr[i] = liointc_get_reg_byname(node, core_reg_names[i]); priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
if (!priv->core_isr[0]) {
err = -ENODEV;
goto out_iounmap_base;
}
} else {
base = of_iomap(node, 0);
if (!base) {
err = -ENODEV;
goto out_free_priv;
}
for (i = 0; i < LIOINTC_NUM_CORES; i++) for (i = 0; i < LIOINTC_NUM_PARENT; i++)
priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; priv->handler[i].parent_int_map = parent_int_map[i];
}
for (i = 0; i < LIOINTC_NUM_PARENT; i++) { if (revision > 1) {
parent_irq[i] = of_irq_get_byname(node, parent_names[i]); for (i = 0; i < LIOINTC_NUM_CORES; i++) {
if (parent_irq[i] > 0) int index = of_property_match_string(node,
have_parent = TRUE; "reg-names", core_reg_names[i]);
}
if (!have_parent) {
err = -ENODEV;
goto out_iounmap_isr;
}
sz = of_property_read_variable_u32_array(node, if (index < 0)
"loongson,parent_int_map", return -EINVAL;
&of_parent_int_map[0],
LIOINTC_NUM_PARENT,
LIOINTC_NUM_PARENT);
if (sz < 4) {
pr_err("loongson-liointc: No parent_int_map\n");
err = -ENODEV;
goto out_iounmap_isr;
}
for (i = 0; i < LIOINTC_NUM_PARENT; i++) priv->core_isr[i] = of_iomap(node, index);
priv->handler[i].parent_int_map = of_parent_int_map[i]; }
}
/* Setup IRQ domain */ /* Setup IRQ domain */
domain = irq_domain_add_linear(node, 32, if (!acpi_disabled)
domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
&acpi_irq_gc_ops, priv);
else
domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
&irq_generic_chip_ops, priv); &irq_generic_chip_ops, priv);
if (!domain) { if (!domain) {
pr_err("loongson-liointc: cannot add IRQ domain\n"); pr_err("loongson-liointc: cannot add IRQ domain\n");
err = -EINVAL; goto out_iounmap;
goto out_iounmap_isr;
} }
err = irq_alloc_domain_generic_chips(domain, 32, 1, err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
node->full_name, handle_level_irq, (node ? node->full_name : "LIOINTC"),
IRQ_NOPROBE, 0, 0); handle_level_irq, 0, IRQ_NOPROBE, 0);
if (err) { if (err) {
pr_err("loongson-liointc: unable to register IRQ domain\n"); pr_err("loongson-liointc: unable to register IRQ domain\n");
goto out_free_domain; goto out_free_domain;
...@@ -299,24 +283,93 @@ static int __init liointc_of_init(struct device_node *node, ...@@ -299,24 +283,93 @@ static int __init liointc_of_init(struct device_node *node,
liointc_chained_handle_irq, &priv->handler[i]); liointc_chained_handle_irq, &priv->handler[i]);
} }
liointc_handle = domain_handle;
return 0; return 0;
out_free_domain: out_free_domain:
irq_domain_remove(domain); irq_domain_remove(domain);
out_iounmap_isr: out_iounmap:
for (i = 0; i < LIOINTC_NUM_CORES; i++) {
if (!priv->core_isr[i])
continue;
iounmap(priv->core_isr[i]);
}
out_iounmap_base:
iounmap(base); iounmap(base);
out_free_priv: out_free_priv:
kfree(priv); kfree(priv);
return err; return -EINVAL;
}
#ifdef CONFIG_OF
static int __init liointc_of_init(struct device_node *node,
struct device_node *parent)
{
bool have_parent = FALSE;
int sz, i, index, revision, err = 0;
struct resource res;
if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
index = 0;
revision = 1;
} else {
index = of_property_match_string(node, "reg-names", "main");
revision = 2;
}
if (of_address_to_resource(node, index, &res))
return -EINVAL;
for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
if (parent_irq[i] > 0)
have_parent = TRUE;
}
if (!have_parent)
return -ENODEV;
sz = of_property_read_variable_u32_array(node,
"loongson,parent_int_map",
&parent_int_map[0],
LIOINTC_NUM_PARENT,
LIOINTC_NUM_PARENT);
if (sz < 4) {
pr_err("loongson-liointc: No parent_int_map\n");
return -ENODEV;
}
err = liointc_init(res.start, resource_size(&res),
revision, of_node_to_fwnode(node), node);
if (err < 0)
return err;
return 0;
} }
IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init); IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
#endif
#ifdef CONFIG_ACPI
int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
{
int ret;
struct fwnode_handle *domain_handle;
parent_int_map[0] = acpi_liointc->cascade_map[0];
parent_int_map[1] = acpi_liointc->cascade_map[1];
parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
}
ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
1, domain_handle, NULL);
if (ret)
irq_domain_free_fwnode(domain_handle);
return ret;
}
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Loongson LPC Interrupt Controller support
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#define pr_fmt(fmt) "lpc: " fmt
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
/* Registers */
#define LPC_INT_CTL 0x00
#define LPC_INT_ENA 0x04
#define LPC_INT_STS 0x08
#define LPC_INT_CLR 0x0c
#define LPC_INT_POL 0x10
#define LPC_COUNT 16
/* LPC_INT_CTL */
#define LPC_INT_CTL_EN BIT(31)
struct pch_lpc {
void __iomem *base;
struct irq_domain *lpc_domain;
raw_spinlock_t lpc_lock;
u32 saved_reg_ctl;
u32 saved_reg_ena;
u32 saved_reg_pol;
};
struct fwnode_handle *pch_lpc_handle;
static void lpc_irq_ack(struct irq_data *d)
{
unsigned long flags;
struct pch_lpc *priv = d->domain->host_data;
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR);
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
}
static void lpc_irq_mask(struct irq_data *d)
{
unsigned long flags;
struct pch_lpc *priv = d->domain->host_data;
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))),
priv->base + LPC_INT_ENA);
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
}
static void lpc_irq_unmask(struct irq_data *d)
{
unsigned long flags;
struct pch_lpc *priv = d->domain->host_data;
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)),
priv->base + LPC_INT_ENA);
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
}
static int lpc_irq_set_type(struct irq_data *d, unsigned int type)
{
u32 val;
u32 mask = 0x1 << (d->hwirq);
struct pch_lpc *priv = d->domain->host_data;
if (!(type & IRQ_TYPE_LEVEL_MASK))
return 0;
val = readl(priv->base + LPC_INT_POL);
if (type == IRQ_TYPE_LEVEL_HIGH)
val |= mask;
else
val &= ~mask;
writel(val, priv->base + LPC_INT_POL);
return 0;
}
static const struct irq_chip pch_lpc_irq_chip = {
.name = "PCH LPC",
.irq_mask = lpc_irq_mask,
.irq_unmask = lpc_irq_unmask,
.irq_ack = lpc_irq_ack,
.irq_set_type = lpc_irq_set_type,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
static void lpc_irq_dispatch(struct irq_desc *desc)
{
u32 pending, bit;
struct irq_chip *chip = irq_desc_get_chip(desc);
struct pch_lpc *priv = irq_desc_get_handler_data(desc);
chained_irq_enter(chip, desc);
pending = readl(priv->base + LPC_INT_ENA);
pending &= readl(priv->base + LPC_INT_STS);
if (!pending)
spurious_interrupt();
while (pending) {
bit = __ffs(pending);
generic_handle_domain_irq(priv->lpc_domain, bit);
pending &= ~BIT(bit);
}
chained_irq_exit(chip, desc);
}
static int pch_lpc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq);
return 0;
}
static const struct irq_domain_ops pch_lpc_domain_ops = {
.map = pch_lpc_map,
.translate = irq_domain_translate_twocell,
};
static void pch_lpc_reset(struct pch_lpc *priv)
{
/* Enable the LPC interrupt, bit31: en bit30: edge */
writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL);
writel(0, priv->base + LPC_INT_ENA);
/* Clear all 18-bit interrpt bit */
writel(GENMASK(17, 0), priv->base + LPC_INT_CLR);
}
static int pch_lpc_disabled(struct pch_lpc *priv)
{
return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) &&
(readl(priv->base + LPC_INT_STS) == 0xffffffff);
}
int __init pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc)
{
int parent_irq;
struct pch_lpc *priv;
struct irq_fwspec fwspec;
struct fwnode_handle *irq_handle;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
raw_spin_lock_init(&priv->lpc_lock);
priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size);
if (!priv->base)
goto free_priv;
if (pch_lpc_disabled(priv)) {
pr_err("Failed to get LPC status\n");
goto iounmap_base;
}
irq_handle = irq_domain_alloc_named_fwnode("lpcintc");
if (!irq_handle) {
pr_err("Unable to allocate domain handle\n");
goto iounmap_base;
}
priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
&pch_lpc_domain_ops, priv);
if (!priv->lpc_domain) {
pr_err("Failed to create IRQ domain\n");
goto free_irq_handle;
}
pch_lpc_reset(priv);
fwspec.fwnode = parent->fwnode;
fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ;
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
fwspec.param_count = 2;
parent_irq = irq_create_fwspec_mapping(&fwspec);
irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv);
pch_lpc_handle = irq_handle;
return 0;
free_irq_handle:
irq_domain_free_fwnode(irq_handle);
iounmap_base:
iounmap(priv->base);
free_priv:
kfree(priv);
return -ENOMEM;
}
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
static int nr_pics;
struct pch_msi_data { struct pch_msi_data {
struct mutex msi_map_lock; struct mutex msi_map_lock;
phys_addr_t doorbell; phys_addr_t doorbell;
...@@ -23,6 +25,8 @@ struct pch_msi_data { ...@@ -23,6 +25,8 @@ struct pch_msi_data {
unsigned long *msi_map; unsigned long *msi_map;
}; };
static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS];
static void pch_msi_mask_msi_irq(struct irq_data *d) static void pch_msi_mask_msi_irq(struct irq_data *d)
{ {
pci_msi_mask_irq(d); pci_msi_mask_irq(d);
...@@ -154,12 +158,12 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = { ...@@ -154,12 +158,12 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = {
}; };
static int pch_msi_init_domains(struct pch_msi_data *priv, static int pch_msi_init_domains(struct pch_msi_data *priv,
struct device_node *node, struct irq_domain *parent,
struct irq_domain *parent) struct fwnode_handle *domain_handle)
{ {
struct irq_domain *middle_domain, *msi_domain; struct irq_domain *middle_domain, *msi_domain;
middle_domain = irq_domain_create_linear(of_node_to_fwnode(node), middle_domain = irq_domain_create_linear(domain_handle,
priv->num_irqs, priv->num_irqs,
&pch_msi_middle_domain_ops, &pch_msi_middle_domain_ops,
priv); priv);
...@@ -171,7 +175,7 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, ...@@ -171,7 +175,7 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
middle_domain->parent = parent; middle_domain->parent = parent;
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), msi_domain = pci_msi_create_irq_domain(domain_handle,
&pch_msi_domain_info, &pch_msi_domain_info,
middle_domain); middle_domain);
if (!msi_domain) { if (!msi_domain) {
...@@ -183,19 +187,11 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, ...@@ -183,19 +187,11 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
return 0; return 0;
} }
static int pch_msi_init(struct device_node *node, static int pch_msi_init(phys_addr_t msg_address, int irq_base, int irq_count,
struct device_node *parent) struct irq_domain *parent_domain, struct fwnode_handle *domain_handle)
{ {
struct pch_msi_data *priv;
struct irq_domain *parent_domain;
struct resource res;
int ret; int ret;
struct pch_msi_data *priv;
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("Failed to find the parent domain\n");
return -ENXIO;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
...@@ -203,48 +199,95 @@ static int pch_msi_init(struct device_node *node, ...@@ -203,48 +199,95 @@ static int pch_msi_init(struct device_node *node,
mutex_init(&priv->msi_map_lock); mutex_init(&priv->msi_map_lock);
ret = of_address_to_resource(node, 0, &res); priv->doorbell = msg_address;
if (ret) { priv->irq_first = irq_base;
pr_err("Failed to allocate resource\n"); priv->num_irqs = irq_count;
goto err_priv;
}
priv->doorbell = res.start;
if (of_property_read_u32(node, "loongson,msi-base-vec",
&priv->irq_first)) {
pr_err("Unable to parse MSI vec base\n");
ret = -EINVAL;
goto err_priv;
}
if (of_property_read_u32(node, "loongson,msi-num-vecs",
&priv->num_irqs)) {
pr_err("Unable to parse MSI vec number\n");
ret = -EINVAL;
goto err_priv;
}
priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL); priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL);
if (!priv->msi_map) { if (!priv->msi_map)
ret = -ENOMEM;
goto err_priv; goto err_priv;
}
pr_debug("Registering %d MSIs, starting at %d\n", pr_debug("Registering %d MSIs, starting at %d\n",
priv->num_irqs, priv->irq_first); priv->num_irqs, priv->irq_first);
ret = pch_msi_init_domains(priv, node, parent_domain); ret = pch_msi_init_domains(priv, parent_domain, domain_handle);
if (ret) if (ret)
goto err_map; goto err_map;
pch_msi_handle[nr_pics++] = domain_handle;
return 0; return 0;
err_map: err_map:
bitmap_free(priv->msi_map); bitmap_free(priv->msi_map);
err_priv: err_priv:
kfree(priv); kfree(priv);
return ret;
return -EINVAL;
}
#ifdef CONFIG_OF
static int pch_msi_of_init(struct device_node *node, struct device_node *parent)
{
int err;
int irq_base, irq_count;
struct resource res;
struct irq_domain *parent_domain;
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("Failed to find the parent domain\n");
return -ENXIO;
}
if (of_address_to_resource(node, 0, &res)) {
pr_err("Failed to allocate resource\n");
return -EINVAL;
}
if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) {
pr_err("Unable to parse MSI vec base\n");
return -EINVAL;
}
if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) {
pr_err("Unable to parse MSI vec number\n");
return -EINVAL;
}
err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node));
if (err < 0)
return err;
return 0;
} }
IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_init); IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init);
#endif
#ifdef CONFIG_ACPI
struct fwnode_handle *get_pch_msi_handle(int pci_segment)
{
int i;
for (i = 0; i < MAX_IO_PICS; i++) {
if (msi_group[i].pci_segment == pci_segment)
return pch_msi_handle[i];
}
return NULL;
}
int __init pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi)
{
int ret;
struct fwnode_handle *domain_handle;
domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
acpi_pchmsi->count, parent, domain_handle);
if (ret < 0)
irq_domain_free_fwnode(domain_handle);
return ret;
}
#endif
...@@ -33,13 +33,40 @@ ...@@ -33,13 +33,40 @@
#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
static int nr_pics;
struct pch_pic { struct pch_pic {
void __iomem *base; void __iomem *base;
struct irq_domain *pic_domain; struct irq_domain *pic_domain;
u32 ht_vec_base; u32 ht_vec_base;
raw_spinlock_t pic_lock; raw_spinlock_t pic_lock;
u32 vec_count;
u32 gsi_base;
}; };
static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
int find_pch_pic(u32 gsi)
{
int i;
/* Find the PCH_PIC that manages this GSI. */
for (i = 0; i < MAX_IO_PICS; i++) {
struct pch_pic *priv = pch_pic_priv[i];
if (!priv)
return -1;
if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
return i;
}
pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
return -1;
}
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{ {
u32 reg; u32 reg;
...@@ -139,6 +166,28 @@ static struct irq_chip pch_pic_irq_chip = { ...@@ -139,6 +166,28 @@ static struct irq_chip pch_pic_irq_chip = {
.irq_set_type = pch_pic_set_type, .irq_set_type = pch_pic_set_type,
}; };
static int pch_pic_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
unsigned int *type)
{
struct pch_pic *priv = d->host_data;
struct device_node *of_node = to_of_node(fwspec->fwnode);
if (fwspec->param_count < 1)
return -EINVAL;
if (of_node) {
*hwirq = fwspec->param[0] + priv->ht_vec_base;
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
} else {
*hwirq = fwspec->param[0] - priv->gsi_base;
*type = IRQ_TYPE_NONE;
}
return 0;
}
static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg) unsigned int nr_irqs, void *arg)
{ {
...@@ -149,13 +198,13 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -149,13 +198,13 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_fwspec parent_fwspec; struct irq_fwspec parent_fwspec;
struct pch_pic *priv = domain->host_data; struct pch_pic *priv = domain->host_data;
err = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); err = pch_pic_domain_translate(domain, fwspec, &hwirq, &type);
if (err) if (err)
return err; return err;
parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 1; parent_fwspec.param_count = 1;
parent_fwspec.param[0] = hwirq + priv->ht_vec_base; parent_fwspec.param[0] = hwirq;
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
if (err) if (err)
...@@ -170,7 +219,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -170,7 +219,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
} }
static const struct irq_domain_ops pch_pic_domain_ops = { static const struct irq_domain_ops pch_pic_domain_ops = {
.translate = irq_domain_translate_twocell, .translate = pch_pic_domain_translate,
.alloc = pch_pic_alloc, .alloc = pch_pic_alloc,
.free = irq_domain_free_irqs_parent, .free = irq_domain_free_irqs_parent,
}; };
...@@ -180,7 +229,7 @@ static void pch_pic_reset(struct pch_pic *priv) ...@@ -180,7 +229,7 @@ static void pch_pic_reset(struct pch_pic *priv)
int i; int i;
for (i = 0; i < PIC_COUNT; i++) { for (i = 0; i < PIC_COUNT; i++) {
/* Write vectored ID */ /* Write vector ID */
writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
/* Hardcode route to HT0 Lo */ /* Hardcode route to HT0 Lo */
writeb(1, priv->base + PCH_INT_ROUTE(i)); writeb(1, priv->base + PCH_INT_ROUTE(i));
...@@ -198,50 +247,37 @@ static void pch_pic_reset(struct pch_pic *priv) ...@@ -198,50 +247,37 @@ static void pch_pic_reset(struct pch_pic *priv)
} }
} }
static int pch_pic_of_init(struct device_node *node, static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
struct device_node *parent) struct irq_domain *parent_domain, struct fwnode_handle *domain_handle,
u32 gsi_base)
{ {
struct pch_pic *priv; struct pch_pic *priv;
struct irq_domain *parent_domain;
int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
raw_spin_lock_init(&priv->pic_lock); raw_spin_lock_init(&priv->pic_lock);
priv->base = of_iomap(node, 0); priv->base = ioremap(addr, size);
if (!priv->base) { if (!priv->base)
err = -ENOMEM;
goto free_priv; goto free_priv;
}
parent_domain = irq_find_host(parent); priv->ht_vec_base = vec_base;
if (!parent_domain) { priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
pr_err("Failed to find the parent domain\n"); priv->gsi_base = gsi_base;
err = -ENXIO;
goto iounmap_base;
}
if (of_property_read_u32(node, "loongson,pic-base-vec",
&priv->ht_vec_base)) {
pr_err("Failed to determine pic-base-vec\n");
err = -EINVAL;
goto iounmap_base;
}
priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0, priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0,
PIC_COUNT, priv->vec_count, domain_handle,
of_node_to_fwnode(node), &pch_pic_domain_ops, priv);
&pch_pic_domain_ops,
priv);
if (!priv->pic_domain) { if (!priv->pic_domain) {
pr_err("Failed to create IRQ domain\n"); pr_err("Failed to create IRQ domain\n");
err = -ENOMEM;
goto iounmap_base; goto iounmap_base;
} }
pch_pic_reset(priv); pch_pic_reset(priv);
pch_pic_handle[nr_pics] = domain_handle;
pch_pic_priv[nr_pics++] = priv;
return 0; return 0;
...@@ -250,7 +286,86 @@ static int pch_pic_of_init(struct device_node *node, ...@@ -250,7 +286,86 @@ static int pch_pic_of_init(struct device_node *node,
free_priv: free_priv:
kfree(priv); kfree(priv);
return err; return -EINVAL;
}
#ifdef CONFIG_OF
static int pch_pic_of_init(struct device_node *node,
struct device_node *parent)
{
int err, vec_base;
struct resource res;
struct irq_domain *parent_domain;
if (of_address_to_resource(node, 0, &res))
return -EINVAL;
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("Failed to find the parent domain\n");
return -ENXIO;
}
if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) {
pr_err("Failed to determine pic-base-vec\n");
return -EINVAL;
}
err = pch_pic_init(res.start, resource_size(&res), vec_base,
parent_domain, of_node_to_fwnode(node), 0);
if (err < 0)
return err;
return 0;
} }
IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init); IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
#endif
#ifdef CONFIG_ACPI
static int __init
pch_lpc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header;
return pch_lpc_acpi_init(pch_pic_priv[0]->pic_domain, pchlpc_entry);
}
static int __init acpi_cascade_irqdomain_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC,
pch_lpc_parse_madt, 0);
return 0;
}
int __init pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic)
{
int ret, vec_base;
struct fwnode_handle *domain_handle;
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
return -ENOMEM;
}
ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
vec_base, parent, domain_handle, acpi_pchpic->gsi_base);
if (ret < 0) {
irq_domain_free_fwnode(domain_handle);
return ret;
}
if (acpi_pchpic->id == 0)
acpi_cascade_irqdomain_init();
return ret;
}
#endif
...@@ -293,7 +293,7 @@ config PCI_HYPERV_INTERFACE ...@@ -293,7 +293,7 @@ config PCI_HYPERV_INTERFACE
config PCI_LOONGSON config PCI_LOONGSON
bool "LOONGSON PCI Controller" bool "LOONGSON PCI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST depends on MACH_LOONGSON64 || COMPILE_TEST
depends on OF depends on OF || ACPI
depends on PCI_QUIRKS depends on PCI_QUIRKS
default MACH_LOONGSON64 default MACH_LOONGSON64
help help
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <linux/of_pci.h> #include <linux/of_pci.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include "../pci.h" #include "../pci.h"
...@@ -18,18 +20,31 @@ ...@@ -18,18 +20,31 @@
#define DEV_PCIE_PORT_2 0x7a29 #define DEV_PCIE_PORT_2 0x7a29
#define DEV_LS2K_APB 0x7a02 #define DEV_LS2K_APB 0x7a02
#define DEV_LS7A_CONF 0x7a10 #define DEV_LS7A_GMAC 0x7a03
#define DEV_LS7A_DC1 0x7a06
#define DEV_LS7A_LPC 0x7a0c #define DEV_LS7A_LPC 0x7a0c
#define DEV_LS7A_AHCI 0x7a08
#define DEV_LS7A_CONF 0x7a10
#define DEV_LS7A_GNET 0x7a13
#define DEV_LS7A_EHCI 0x7a14
#define DEV_LS7A_DC2 0x7a36
#define DEV_LS7A_HDMI 0x7a37
#define FLAG_CFG0 BIT(0) #define FLAG_CFG0 BIT(0)
#define FLAG_CFG1 BIT(1) #define FLAG_CFG1 BIT(1)
#define FLAG_DEV_FIX BIT(2) #define FLAG_DEV_FIX BIT(2)
#define FLAG_DEV_HIDDEN BIT(3)
struct loongson_pci_data {
u32 flags;
struct pci_ops *ops;
};
struct loongson_pci { struct loongson_pci {
void __iomem *cfg0_base; void __iomem *cfg0_base;
void __iomem *cfg1_base; void __iomem *cfg1_base;
struct platform_device *pdev; struct platform_device *pdev;
u32 flags; const struct loongson_pci_data *data;
}; };
/* Fixup wrong class code in PCIe bridges */ /* Fixup wrong class code in PCIe bridges */
...@@ -92,55 +107,106 @@ static void loongson_mrrs_quirk(struct pci_dev *dev) ...@@ -92,55 +107,106 @@ static void loongson_mrrs_quirk(struct pci_dev *dev)
} }
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk); DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
static void __iomem *cfg1_map(struct loongson_pci *priv, int bus, static void loongson_pci_pin_quirk(struct pci_dev *pdev)
unsigned int devfn, int where)
{ {
unsigned long addroff = 0x0; pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_DC1, loongson_pci_pin_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_DC2, loongson_pci_pin_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_GMAC, loongson_pci_pin_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_AHCI, loongson_pci_pin_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_EHCI, loongson_pci_pin_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_GNET, loongson_pci_pin_quirk);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
{
struct pci_config_window *cfg;
if (bus != 0) if (acpi_disabled)
addroff |= BIT(28); /* Type 1 Access */ return (struct loongson_pci *)(bus->sysdata);
addroff |= (where & 0xff) | ((where & 0xf00) << 16);
addroff |= (bus << 16) | (devfn << 8); cfg = bus->sysdata;
return priv->cfg1_base + addroff; return (struct loongson_pci *)(cfg->priv);
} }
static void __iomem *cfg0_map(struct loongson_pci *priv, int bus, static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus,
unsigned int devfn, int where) unsigned int devfn, int where)
{ {
unsigned long addroff = 0x0; unsigned long addroff = 0x0;
unsigned char busnum = bus->number;
if (bus != 0) if (!pci_is_root_bus(bus)) {
addroff |= BIT(24); /* Type 1 Access */ addroff |= BIT(24); /* Type 1 Access */
addroff |= (bus << 16) | (devfn << 8) | where; addroff |= (busnum << 16);
}
addroff |= (devfn << 8) | where;
return priv->cfg0_base + addroff; return priv->cfg0_base + addroff;
} }
static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn, static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus,
int where) unsigned int devfn, int where)
{ {
unsigned long addroff = 0x0;
unsigned char busnum = bus->number; unsigned char busnum = bus->number;
struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
struct loongson_pci *priv = pci_host_bridge_priv(bridge); if (!pci_is_root_bus(bus)) {
addroff |= BIT(28); /* Type 1 Access */
addroff |= (busnum << 16);
}
addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16);
return priv->cfg1_base + addroff;
}
static bool pdev_may_exist(struct pci_bus *bus, unsigned int device,
unsigned int function)
{
return !(pci_is_root_bus(bus) &&
(device >= 9 && device <= 20) && (function > 0));
}
static void __iomem *pci_loongson_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
unsigned int device = PCI_SLOT(devfn);
unsigned int function = PCI_FUNC(devfn);
struct loongson_pci *priv = pci_bus_to_loongson_pci(bus);
/* /*
* Do not read more than one device on the bus other than * Do not read more than one device on the bus other than
* the host bus. For our hardware the root bus is always bus 0. * the host bus.
*/ */
if (priv->flags & FLAG_DEV_FIX && busnum != 0 && if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) {
PCI_SLOT(devfn) > 0) if (!pci_is_root_bus(bus) && (device > 0))
return NULL; return NULL;
}
/* Don't access non-existent devices */
if (priv->data->flags & FLAG_DEV_HIDDEN) {
if (!pdev_may_exist(bus, device, function))
return NULL;
}
/* CFG0 can only access standard space */ /* CFG0 can only access standard space */
if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base) if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base)
return cfg0_map(priv, busnum, devfn, where); return cfg0_map(priv, bus, devfn, where);
/* CFG1 can access extended space */ /* CFG1 can access extended space */
if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base) if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base)
return cfg1_map(priv, busnum, devfn, where); return cfg1_map(priv, bus, devfn, where);
return NULL; return NULL;
} }
#ifdef CONFIG_OF
static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{ {
int irq; int irq;
...@@ -159,20 +225,42 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ...@@ -159,20 +225,42 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return val; return val;
} }
/* H/w only accept 32-bit PCI operations */ /* LS2K/LS7A accept 8/16/32-bit PCI config operations */
static struct pci_ops loongson_pci_ops = { static struct pci_ops loongson_pci_ops = {
.map_bus = pci_loongson_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
};
/* RS780/SR5690 only accept 32-bit PCI config operations */
static struct pci_ops loongson_pci_ops32 = {
.map_bus = pci_loongson_map_bus, .map_bus = pci_loongson_map_bus,
.read = pci_generic_config_read32, .read = pci_generic_config_read32,
.write = pci_generic_config_write32, .write = pci_generic_config_write32,
}; };
static const struct loongson_pci_data ls2k_pci_data = {
.flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
.ops = &loongson_pci_ops,
};
static const struct loongson_pci_data ls7a_pci_data = {
.flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN,
.ops = &loongson_pci_ops,
};
static const struct loongson_pci_data rs780e_pci_data = {
.flags = FLAG_CFG0,
.ops = &loongson_pci_ops32,
};
static const struct of_device_id loongson_pci_of_match[] = { static const struct of_device_id loongson_pci_of_match[] = {
{ .compatible = "loongson,ls2k-pci", { .compatible = "loongson,ls2k-pci",
.data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), }, .data = &ls2k_pci_data, },
{ .compatible = "loongson,ls7a-pci", { .compatible = "loongson,ls7a-pci",
.data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), }, .data = &ls7a_pci_data, },
{ .compatible = "loongson,rs780e-pci", { .compatible = "loongson,rs780e-pci",
.data = (void *)(FLAG_CFG0), }, .data = &rs780e_pci_data, },
{} {}
}; };
...@@ -193,20 +281,20 @@ static int loongson_pci_probe(struct platform_device *pdev) ...@@ -193,20 +281,20 @@ static int loongson_pci_probe(struct platform_device *pdev)
priv = pci_host_bridge_priv(bridge); priv = pci_host_bridge_priv(bridge);
priv->pdev = pdev; priv->pdev = pdev;
priv->flags = (unsigned long)of_device_get_match_data(dev); priv->data = of_device_get_match_data(dev);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (priv->data->flags & FLAG_CFG0) {
if (!regs) { regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev_err(dev, "missing mem resources for cfg0\n"); if (!regs)
return -EINVAL; dev_err(dev, "missing mem resources for cfg0\n");
else {
priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs);
if (IS_ERR(priv->cfg0_base))
return PTR_ERR(priv->cfg0_base);
}
} }
priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs); if (priv->data->flags & FLAG_CFG1) {
if (IS_ERR(priv->cfg0_base))
return PTR_ERR(priv->cfg0_base);
/* CFG1 is optional */
if (priv->flags & FLAG_CFG1) {
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!regs) if (!regs)
dev_info(dev, "missing mem resource for cfg1\n"); dev_info(dev, "missing mem resource for cfg1\n");
...@@ -218,7 +306,7 @@ static int loongson_pci_probe(struct platform_device *pdev) ...@@ -218,7 +306,7 @@ static int loongson_pci_probe(struct platform_device *pdev)
} }
bridge->sysdata = priv; bridge->sysdata = priv;
bridge->ops = &loongson_pci_ops; bridge->ops = priv->data->ops;
bridge->map_irq = loongson_map_irq; bridge->map_irq = loongson_map_irq;
return pci_host_probe(bridge); return pci_host_probe(bridge);
...@@ -232,3 +320,41 @@ static struct platform_driver loongson_pci_driver = { ...@@ -232,3 +320,41 @@ static struct platform_driver loongson_pci_driver = {
.probe = loongson_pci_probe, .probe = loongson_pci_probe,
}; };
builtin_platform_driver(loongson_pci_driver); builtin_platform_driver(loongson_pci_driver);
#endif
#ifdef CONFIG_ACPI
static int loongson_pci_ecam_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
struct loongson_pci *priv;
struct loongson_pci_data *data;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
cfg->priv = priv;
data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN;
priv->data = data;
priv->cfg1_base = cfg->win - (cfg->busr.start << 16);
return 0;
}
const struct pci_ecam_ops loongson_pci_ecam_ops = {
.bus_shift = 16,
.init = loongson_pci_ecam_init,
.pci_ops = {
.map_bus = pci_loongson_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
}
};
#endif
...@@ -41,8 +41,10 @@ const char *pci_power_names[] = { ...@@ -41,8 +41,10 @@ const char *pci_power_names[] = {
}; };
EXPORT_SYMBOL_GPL(pci_power_names); EXPORT_SYMBOL_GPL(pci_power_names);
#ifdef CONFIG_X86_32
int isa_dma_bridge_buggy; int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy); EXPORT_SYMBOL(isa_dma_bridge_buggy);
#endif
int pci_pci_problems; int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems); EXPORT_SYMBOL(pci_pci_problems);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/isa-dma.h> /* isa_dma_bridge_buggy */
#include <linux/init.h> #include <linux/init.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/acpi.h> #include <linux/acpi.h>
...@@ -30,7 +31,6 @@ ...@@ -30,7 +31,6 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/switchtec.h> #include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h" #include "pci.h"
static ktime_t fixup_debug_start(struct pci_dev *dev, static ktime_t fixup_debug_start(struct pci_dev *dev,
...@@ -239,6 +239,7 @@ static void quirk_passive_release(struct pci_dev *dev) ...@@ -239,6 +239,7 @@ static void quirk_passive_release(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
#ifdef CONFIG_X86_32
/* /*
* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
* workaround but VIA don't answer queries. If you happen to have good * workaround but VIA don't answer queries. If you happen to have good
...@@ -265,6 +266,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma ...@@ -265,6 +266,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
#endif
/* /*
* Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/libata.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -322,8 +323,8 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci, ...@@ -322,8 +323,8 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
* treat the compatibility IRQs as busy. * treat the compatibility IRQs as busy.
*/ */
if ((progif & 0x5) != 0x5) if ((progif & 0x5) != 0x5)
if (pci_get_legacy_ide_irq(pci, 0) == irq || if (ATA_PRIMARY_IRQ(pci) == irq ||
pci_get_legacy_ide_irq(pci, 1) == irq) { ATA_SECONDARY_IRQ(pci) == irq) {
pnp_dbg(&pnp->dev, " legacy IDE device %s " pnp_dbg(&pnp->dev, " legacy IDE device %s "
"using irq %d\n", pci_name(pci), irq); "using irq %d\n", pci_name(pci), irq);
return 1; return 1;
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/asm-generic/pci.h
*
* Copyright (C) 2003 Russell King
*/
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ #ifndef __ASM_GENERIC_PCI_H
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) #define __ASM_GENERIC_PCI_H
#ifndef PCIBIOS_MIN_IO
#define PCIBIOS_MIN_IO 0
#endif
#ifndef PCIBIOS_MIN_MEM
#define PCIBIOS_MIN_MEM 0
#endif
#ifndef pcibios_assign_all_busses
/* For bootloaders that do not initialize the PCI bus */
#define pcibios_assign_all_busses() 1
#endif
/* Enable generic resource mapping code in drivers/pci/ */
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#ifdef CONFIG_PCI_DOMAINS
static inline int pci_proc_domain(struct pci_bus *bus)
{ {
return channel ? 15 : 14; /* always show the domain in /proc */
return 1;
} }
#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ #endif /* CONFIG_PCI_DOMAINS */
#endif /* _ASM_GENERIC_PCI_H */ #endif /* __ASM_GENERIC_PCI_H */
...@@ -25,6 +25,8 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *); ...@@ -25,6 +25,8 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP #ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
unsigned int nr); unsigned int nr);
#elif !defined(CONFIG_HAS_IOPORT_MAP)
#define __pci_ioport_map(dev, port, nr) NULL
#else #else
#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) #define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
#endif #endif
......
...@@ -105,6 +105,7 @@ enum acpi_irq_model_id { ...@@ -105,6 +105,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC, ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM, ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC, ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_LPIC,
ACPI_IRQ_MODEL_COUNT ACPI_IRQ_MODEL_COUNT
}; };
...@@ -356,7 +357,8 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); ...@@ -356,7 +357,8 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
void acpi_set_irq_model(enum acpi_irq_model_id model, void acpi_set_irq_model(enum acpi_irq_model_id model,
struct fwnode_handle *fwnode); struct fwnode_handle *(*)(u32));
void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
unsigned int size, unsigned int size,
......
...@@ -150,6 +150,7 @@ enum cpuhp_state { ...@@ -150,6 +150,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_RISCV_STARTING, CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_MICROCODE_LOADER,
......
...@@ -1121,6 +1121,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on); ...@@ -1121,6 +1121,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on);
/* Setup functions for irq_chip_generic */ /* Setup functions for irq_chip_generic */
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq); irq_hw_number_t hw_irq);
void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
struct irq_chip_generic * struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler); void __iomem *reg_base, irq_flow_handler_t handler);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_ISA_DMA_H
#define __LINUX_ISA_DMA_H
#include <asm/dma.h>
#if defined(CONFIG_PCI) && defined(CONFIG_X86_32)
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* __LINUX_ISA_DMA_H */
...@@ -87,6 +87,7 @@ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 * ...@@ -87,6 +87,7 @@ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 *
extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#endif #endif
#if IS_ENABLED(CONFIG_PCI_HOST_COMMON) #if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
......
...@@ -431,7 +431,7 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, ...@@ -431,7 +431,7 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
return 0; return 0;
} }
static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq) void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
{ {
struct irq_data *data = irq_domain_get_irq_data(d, virq); struct irq_data *data = irq_domain_get_irq_data(d, virq);
struct irq_domain_chip_generic *dgc = d->gc; struct irq_domain_chip_generic *dgc = d->gc;
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
#undef HAVE_REALLY_SLOW_DMA_CONTROLLER #undef HAVE_REALLY_SLOW_DMA_CONTROLLER
#include <linux/export.h> #include <linux/export.h>
#include <linux/isa-dma.h>
#include <sound/core.h> #include <sound/core.h>
#include <asm/dma.h>
/** /**
* snd_dma_program - program an ISA DMA transfer * snd_dma_program - program an ISA DMA transfer
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment