Commit 2ae21010 authored by Suresh Siddha's avatar Suresh Siddha Committed by Ingo Molnar

x64, x2apic/intr-remap: Interrupt remapping infrastructure

Interrupt remapping (part of Intel Virtualization Tech for directed I/O)
infrastructure.
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Cc: akpm@linux-foundation.org
Cc: arjan@linux.intel.com
Cc: andi@firstfloor.org
Cc: ebiederm@xmission.com
Cc: jbarnes@virtuousgeek.org
Cc: steiner@sgi.com
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent fe962e90
...@@ -145,6 +145,8 @@ struct device_domain_info { ...@@ -145,6 +145,8 @@ struct device_domain_info {
extern int init_dmars(void); extern int init_dmars(void);
extern void free_dmar_iommu(struct intel_iommu *iommu); extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int dmar_disabled;
#ifndef CONFIG_DMAR_GFX_WA #ifndef CONFIG_DMAR_GFX_WA
static inline void iommu_prepare_gfx_mapping(void) static inline void iommu_prepare_gfx_mapping(void)
{ {
......
...@@ -449,6 +449,22 @@ int __init early_dmar_detect(void) ...@@ -449,6 +449,22 @@ int __init early_dmar_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0); return (ACPI_SUCCESS(status) ? 1 : 0);
} }
void __init detect_intel_iommu(void)
{
int ret;
ret = early_dmar_detect();
#ifdef CONFIG_DMAR
{
if (ret && !no_iommu && !iommu_detected && !swiotlb &&
!dmar_disabled)
iommu_detected = 1;
}
#endif
}
int alloc_iommu(struct dmar_drhd_unit *drhd) int alloc_iommu(struct dmar_drhd_unit *drhd)
{ {
struct intel_iommu *iommu; struct intel_iommu *iommu;
......
...@@ -76,7 +76,7 @@ static long list_size; ...@@ -76,7 +76,7 @@ static long list_size;
static void domain_remove_dev_info(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain);
static int dmar_disabled; int dmar_disabled;
static int __initdata dmar_map_gfx = 1; static int __initdata dmar_map_gfx = 1;
static int dmar_forcedac; static int dmar_forcedac;
static int intel_iommu_strict; static int intel_iommu_strict;
...@@ -2238,15 +2238,6 @@ static void __init iommu_exit_mempool(void) ...@@ -2238,15 +2238,6 @@ static void __init iommu_exit_mempool(void)
} }
void __init detect_intel_iommu(void)
{
if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
return;
if (early_dmar_detect()) {
iommu_detected = 1;
}
}
static void __init init_no_remapping_devices(void) static void __init init_no_remapping_devices(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
...@@ -2293,15 +2284,19 @@ int __init intel_iommu_init(void) ...@@ -2293,15 +2284,19 @@ int __init intel_iommu_init(void)
{ {
int ret = 0; int ret = 0;
if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV;
if (dmar_table_init()) if (dmar_table_init())
return -ENODEV; return -ENODEV;
if (dmar_dev_scope_init()) if (dmar_dev_scope_init())
return -ENODEV; return -ENODEV;
/*
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV;
iommu_init_mempool(); iommu_init_mempool();
dmar_init_reserved_ranges(); dmar_init_reserved_ranges();
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ #define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
#define OFFSET_STRIDE (9) #define OFFSET_STRIDE (9)
/* /*
...@@ -157,16 +158,20 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -157,16 +158,20 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_GCMD_SRTP (((u32)1) << 30) #define DMA_GCMD_SRTP (((u32)1) << 30)
#define DMA_GCMD_SFL (((u32)1) << 29) #define DMA_GCMD_SFL (((u32)1) << 29)
#define DMA_GCMD_EAFL (((u32)1) << 28) #define DMA_GCMD_EAFL (((u32)1) << 28)
#define DMA_GCMD_QIE (((u32)1) << 26)
#define DMA_GCMD_WBF (((u32)1) << 27) #define DMA_GCMD_WBF (((u32)1) << 27)
#define DMA_GCMD_QIE (((u32)1) << 26)
#define DMA_GCMD_SIRTP (((u32)1) << 24)
#define DMA_GCMD_IRE (((u32) 1) << 25)
/* GSTS_REG */ /* GSTS_REG */
#define DMA_GSTS_TES (((u32)1) << 31) #define DMA_GSTS_TES (((u32)1) << 31)
#define DMA_GSTS_RTPS (((u32)1) << 30) #define DMA_GSTS_RTPS (((u32)1) << 30)
#define DMA_GSTS_FLS (((u32)1) << 29) #define DMA_GSTS_FLS (((u32)1) << 29)
#define DMA_GSTS_AFLS (((u32)1) << 28) #define DMA_GSTS_AFLS (((u32)1) << 28)
#define DMA_GSTS_QIES (((u32)1) << 26)
#define DMA_GSTS_WBFS (((u32)1) << 27) #define DMA_GSTS_WBFS (((u32)1) << 27)
#define DMA_GSTS_QIES (((u32)1) << 26)
#define DMA_GSTS_IRTPS (((u32)1) << 24)
#define DMA_GSTS_IRES (((u32)1) << 25)
/* CCMD_REG */ /* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63) #define DMA_CCMD_ICC (((u64)1) << 63)
...@@ -245,6 +250,16 @@ struct q_inval { ...@@ -245,6 +250,16 @@ struct q_inval {
int free_cnt; int free_cnt;
}; };
#ifdef CONFIG_INTR_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
struct ir_table {
struct irte *base;
};
#endif
struct intel_iommu { struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 cap; u64 cap;
...@@ -266,6 +281,9 @@ struct intel_iommu { ...@@ -266,6 +281,9 @@ struct intel_iommu {
struct sys_device sysdev; struct sys_device sysdev;
#endif #endif
struct q_inval *qi; /* Queued invalidation info */ struct q_inval *qi; /* Queued invalidation info */
#ifdef CONFIG_INTR_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
#endif
}; };
static inline void __iommu_flush_cache( static inline void __iommu_flush_cache(
...@@ -279,5 +297,7 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); ...@@ -279,5 +297,7 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int alloc_iommu(struct dmar_drhd_unit *drhd); extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu); extern void free_iommu(struct intel_iommu *iommu);
extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
#endif #endif
#include <linux/dmar.h> #include <linux/dmar.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/pci.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include "intel-iommu.h" #include "intel-iommu.h"
#include "intr_remapping.h" #include "intr_remapping.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num; static int ir_ioapic_num;
int intr_remapping_enabled;
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
{
u64 addr;
u32 cmd, sts;
unsigned long flags;
addr = virt_to_phys((void *)iommu->ir_table->base);
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg + DMAR_IRTA_REG,
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
/* Set interrupt-remapping table pointer */
cmd = iommu->gcmd | DMA_GCMD_SIRTP;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
/*
* global invalidation of interrupt entry cache before enabling
* interrupt-remapping.
*/
qi_global_iec(iommu);
spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
cmd = iommu->gcmd | DMA_GCMD_IRE;
iommu->gcmd |= DMA_GCMD_IRE;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
{
struct ir_table *ir_table;
struct page *pages;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_KERNEL);
if (!iommu->ir_table)
return -ENOMEM;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
if (!pages) {
printk(KERN_ERR "failed to allocate pages of order %d\n",
INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table);
return -ENOMEM;
}
ir_table->base = page_address(pages);
iommu_set_intr_remapping(iommu, mode);
return 0;
}
int __init enable_intr_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
int setup = 0;
/*
* check for the Interrupt-remapping support
*/
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
if (!ecap_ir_support(iommu->ecap))
continue;
if (eim && !ecap_eim_support(iommu->ecap)) {
printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
" ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
return -1;
}
}
/*
* Enable queued invalidation for all the DRHD's.
*/
for_each_drhd_unit(drhd) {
int ret;
struct intel_iommu *iommu = drhd->iommu;
ret = dmar_enable_qi(iommu);
if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
" invalidation, ecap %Lx, ret %d\n",
drhd->reg_base_addr, iommu->ecap, ret);
return -1;
}
}
/*
* Setup Interrupt-remapping for all the DRHD's now.
*/
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
if (!ecap_ir_support(iommu->ecap))
continue;
if (setup_intr_remapping(iommu, eim))
goto error;
setup = 1;
}
if (!setup)
goto error;
intr_remapping_enabled = 1;
return 0;
error:
/*
* handle error condition gracefully here!
*/
return -1;
}
static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu) struct intel_iommu *iommu)
......
...@@ -4,3 +4,5 @@ struct ioapic_scope { ...@@ -4,3 +4,5 @@ struct ioapic_scope {
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned int id; unsigned int id;
}; };
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
...@@ -25,9 +25,85 @@ ...@@ -25,9 +25,85 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/msi.h> #include <linux/msi.h>
#ifdef CONFIG_DMAR #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct intel_iommu; struct intel_iommu;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */
int devices_cnt; /* target device count */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
struct intel_iommu *iommu;
};
extern struct list_head dmar_drhd_units;
#define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list)
extern int dmar_table_init(void);
extern int early_dmar_detect(void);
extern int dmar_dev_scope_init(void);
/* Intel IOMMU detection */
extern void detect_intel_iommu(void);
extern int parse_ioapics_under_ir(void);
extern int alloc_iommu(struct dmar_drhd_unit *);
#else
static inline void detect_intel_iommu(void)
{
return;
}
static inline int dmar_table_init(void)
{
return -ENODEV;
}
#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int enable_intr_remapping(int);
struct irte {
union {
struct {
__u64 present : 1,
fpd : 1,
dst_mode : 1,
redir_hint : 1,
trigger_mode : 1,
dlvry_mode : 3,
avail : 4,
__reserved_1 : 4,
vector : 8,
__reserved_2 : 8,
dest_id : 32;
};
__u64 low;
};
union {
struct {
__u64 sid : 16,
sq : 2,
svt : 2,
__reserved_3 : 44;
};
__u64 high;
};
};
#else
#define enable_intr_remapping(mode) (-1)
#define intr_remapping_enabled (0)
#endif
#ifdef CONFIG_DMAR
extern const char *dmar_get_fault_reason(u8 fault_reason); extern const char *dmar_get_fault_reason(u8 fault_reason);
/* Can't use the common MSI interrupt functions /* Can't use the common MSI interrupt functions
...@@ -40,29 +116,8 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg); ...@@ -40,29 +116,8 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu); extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern int arch_setup_dmar_msi(unsigned int irq); extern int arch_setup_dmar_msi(unsigned int irq);
/* Intel IOMMU detection and initialization functions */ extern int iommu_detected, no_iommu;
extern void detect_intel_iommu(void);
extern int intel_iommu_init(void);
extern int dmar_table_init(void);
extern int early_dmar_detect(void);
extern int dmar_dev_scope_init(void);
extern int parse_ioapics_under_ir(void);
extern struct list_head dmar_drhd_units;
extern struct list_head dmar_rmrr_units; extern struct list_head dmar_rmrr_units;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */
int devices_cnt; /* target device count */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
struct intel_iommu *iommu;
};
struct dmar_rmrr_unit { struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */ struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */ struct acpi_dmar_header *hdr; /* ACPI header */
...@@ -72,24 +127,19 @@ struct dmar_rmrr_unit { ...@@ -72,24 +127,19 @@ struct dmar_rmrr_unit {
int devices_cnt; /* target device count */ int devices_cnt; /* target device count */
}; };
#define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list)
#define for_each_rmrr_units(rmrr) \ #define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list) list_for_each_entry(rmrr, &dmar_rmrr_units, list)
/* Intel DMAR initialization functions */
extern int alloc_iommu(struct dmar_drhd_unit *); extern int intel_iommu_init(void);
extern int dmar_disabled;
#else #else
static inline void detect_intel_iommu(void)
{
return;
}
static inline int intel_iommu_init(void) static inline int intel_iommu_init(void)
{ {
#ifdef CONFIG_INTR_REMAP
return dmar_dev_scope_init();
#else
return -ENODEV; return -ENODEV;
} #endif
static inline int dmar_table_init(void)
{
return -ENODEV;
} }
#endif /* !CONFIG_DMAR */ #endif /* !CONFIG_DMAR */
#endif /* __DMAR_H__ */ #endif /* __DMAR_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment