Commit 269b0123 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu: (89 commits)
  AMD IOMMU: remove now unnecessary #ifdefs
  AMD IOMMU: prealloc_protection_domains should be static
  kvm/iommu: fix compile warning
  AMD IOMMU: add statistics about total number of map requests
  AMD IOMMU: add statistics about allocated io memory
  AMD IOMMU: add stats counter for domain tlb flushes
  AMD IOMMU: add stats counter for single iommu domain tlb flushes
  AMD IOMMU: add stats counter for cross-page request
  AMD IOMMU: add stats counter for free_coherent requests
  AMD IOMMU: add stats counter for alloc_coherent requests
  AMD IOMMU: add stats counter for unmap_sg requests
  AMD IOMMU: add stats counter for map_sg requests
  AMD IOMMU: add stats counter for unmap_single requests
  AMD IOMMU: add stats counter for map_single requests
  AMD IOMMU: add stats counter for completion wait events
  AMD IOMMU: add init code for statistic collection
  AMD IOMMU: add necessary header defines for stats counting
  AMD IOMMU: add Kconfig entry for statistic collection code
  AMD IOMMU: use dev_name in iommu_enable function
  AMD IOMMU: use calc_devid in prealloc_protection_domains
  ...
parents f60a0a79 065a6d68
......@@ -687,3 +687,6 @@ config IRQ_PER_CPU
config IOMMU_HELPER
def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
config IOMMU_API
def_bool (DMAR)
......@@ -467,7 +467,7 @@ struct kvm_arch {
struct kvm_sal_data rdv_sal_data;
struct list_head assigned_dev_head;
struct dmar_domain *intel_iommu_domain;
struct iommu_domain *iommu_domain;
struct hlist_head irq_ack_notifier_list;
unsigned long irq_sources_bitmap;
......
......@@ -51,8 +51,8 @@ EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
coalesced_mmio.o irq_comm.o)
ifeq ($(CONFIG_DMAR),y)
common-objs += $(addprefix ../../../virt/kvm/, vtd.o)
ifeq ($(CONFIG_IOMMU_API),y)
common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
endif
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
......
......@@ -31,6 +31,7 @@
#include <linux/bitops.h>
#include <linux/hrtimer.h>
#include <linux/uaccess.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <asm/pgtable.h>
......@@ -188,7 +189,7 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_IOMMU:
r = intel_iommu_found();
r = iommu_found();
break;
default:
r = 0;
......
......@@ -586,6 +586,16 @@ config AMD_IOMMU
your BIOS for an option to enable it or if you have an IVRS ACPI
table.
config AMD_IOMMU_STATS
bool "Export AMD IOMMU statistics to debugfs"
depends on AMD_IOMMU
select DEBUG_FS
help
This option enables code in the AMD IOMMU driver to collect various
statistics about whats happening in the driver and exports that
information to userspace via debugfs.
If unsure, say N.
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
def_bool y if X86_64
......@@ -599,6 +609,9 @@ config SWIOTLB
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
config IOMMU_API
def_bool (AMD_IOMMU || DMAR)
config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
......
......@@ -190,6 +190,11 @@
/* FIXME: move this macro to <linux/pci.h> */
#define PCI_BUS(x) (((x) >> 8) & 0xff)
/* Protection domain flags */
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
domain for an IOMMU */
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
......@@ -199,6 +204,8 @@ struct protection_domain {
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
unsigned long flags; /* flags to find out type of domain */
unsigned dev_cnt; /* devices assigned to this domain */
void *priv; /* private data */
};
......@@ -295,7 +302,7 @@ struct amd_iommu {
bool int_enabled;
/* if one, we need to send a completion wait command */
int need_sync;
bool need_sync;
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
......@@ -374,7 +381,7 @@ extern struct protection_domain **amd_iommu_pd_table;
extern unsigned long *amd_iommu_pd_alloc_bitmap;
/* will be 1 if device isolation is enabled */
extern int amd_iommu_isolate;
extern bool amd_iommu_isolate;
/*
* If true, the addresses will be flushed on unmap time, not when
......@@ -382,18 +389,6 @@ extern int amd_iommu_isolate;
*/
extern bool amd_iommu_unmap_flush;
/* takes a PCI device id and prints it out in a readable form */
static inline void print_devid(u16 devid, int nl)
{
int bus = devid >> 8;
int dev = devid >> 3 & 0x1f;
int fn = devid & 0x07;
printk("%02x:%02x.%x", bus, dev, fn);
if (nl)
printk("\n");
}
/* takes bus and device/function and returns the device id
* FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn)
......@@ -401,4 +396,32 @@ static inline u16 calc_devid(u8 bus, u8 devfn)
return (((u16)bus) << 8) | devfn;
}
#ifdef CONFIG_AMD_IOMMU_STATS
struct __iommu_counter {
char *name;
struct dentry *dent;
u64 value;
};
#define DECLARE_STATS_COUNTER(nm) \
static struct __iommu_counter nm = { \
.name = #nm, \
}
#define INC_STATS_COUNTER(name) name.value += 1
#define ADD_STATS_COUNTER(name, x) name.value += (x)
#define SUB_STATS_COUNTER(name, x) name.value -= (x)
#else /* CONFIG_AMD_IOMMU_STATS */
#define DECLARE_STATS_COUNTER(name)
#define INC_STATS_COUNTER(name)
#define ADD_STATS_COUNTER(name, x)
#define SUB_STATS_COUNTER(name, x)
static inline void amd_iommu_stats_init(void) { }
#endif /* CONFIG_AMD_IOMMU_STATS */
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
......@@ -360,7 +360,7 @@ struct kvm_arch{
struct list_head active_mmu_pages;
struct list_head assigned_dev_head;
struct list_head oos_global_pages;
struct dmar_domain *intel_iommu_domain;
struct iommu_domain *iommu_domain;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
struct kvm_pit *vpit;
......
This diff is collapsed.
......@@ -122,7 +122,8 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
bool amd_iommu_isolate = true; /* if true, device isolation is
enabled */
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
......@@ -245,12 +246,8 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
/* Function to enable the hardware */
static void __init iommu_enable(struct amd_iommu *iommu)
{
printk(KERN_INFO "AMD IOMMU: Enabling IOMMU "
"at %02x:%02x.%x cap 0x%hx\n",
iommu->dev->bus->number,
PCI_SLOT(iommu->dev->devfn),
PCI_FUNC(iommu->dev->devfn),
iommu->cap_ptr);
printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
dev_name(&iommu->dev->dev), iommu->cap_ptr);
iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
}
......@@ -1218,9 +1215,9 @@ static int __init parse_amd_iommu_options(char *str)
{
for (; *str; ++str) {
if (strncmp(str, "isolate", 7) == 0)
amd_iommu_isolate = 1;
amd_iommu_isolate = true;
if (strncmp(str, "share", 5) == 0)
amd_iommu_isolate = 0;
amd_iommu_isolate = false;
if (strncmp(str, "fullflush", 9) == 0)
amd_iommu_unmap_flush = true;
}
......
......@@ -7,8 +7,8 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
ifeq ($(CONFIG_KVM_TRACE),y)
common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
endif
ifeq ($(CONFIG_DMAR),y)
common-objs += $(addprefix ../../../virt/kvm/, vtd.o)
ifeq ($(CONFIG_IOMMU_API),y)
common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
endif
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
......
......@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <asm/uaccess.h>
......@@ -989,7 +990,7 @@ int kvm_dev_ioctl_check_extension(long ext)
r = !tdp_enabled;
break;
case KVM_CAP_IOMMU:
r = intel_iommu_found();
r = iommu_found();
break;
default:
r = 0;
......
......@@ -11,6 +11,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
obj-$(CONFIG_SMP) += topology.o
obj-$(CONFIG_IOMMU_API) += iommu.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
endif
......
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/iommu.h>
static struct iommu_ops *iommu_ops;
void register_iommu(struct iommu_ops *ops)
{
if (iommu_ops)
BUG();
iommu_ops = ops;
}
bool iommu_found()
{
return iommu_ops != NULL;
}
EXPORT_SYMBOL_GPL(iommu_found);
struct iommu_domain *iommu_domain_alloc(void)
{
struct iommu_domain *domain;
int ret;
domain = kmalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
ret = iommu_ops->domain_init(domain);
if (ret)
goto out_free;
return domain;
out_free:
kfree(domain);
return NULL;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
iommu_ops->domain_destroy(domain);
kfree(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
return iommu_ops->attach_dev(domain, dev);
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
iommu_ops->detach_dev(domain, dev);
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
return iommu_ops->map(domain, iova, paddr, size, prot);
}
EXPORT_SYMBOL_GPL(iommu_map_range);
void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
iommu_ops->unmap(domain, iova, size);
}
EXPORT_SYMBOL_GPL(iommu_unmap_range);
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
return iommu_ops->iova_to_phys(domain, iova);
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
......@@ -191,26 +191,17 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
struct acpi_dmar_hardware_unit *drhd;
static int include_all;
int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
if (!dmaru->include_all)
if (dmaru->include_all)
return 0;
ret = dmar_parse_dev_scope((void *)(drhd + 1),
((void *)drhd) + drhd->header.length,
&dmaru->devices_cnt, &dmaru->devices,
drhd->segment);
else {
/* Only allow one INCLUDE_ALL */
if (include_all) {
printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
"device scope is allowed\n");
ret = -EINVAL;
}
include_all = 1;
}
if (ret) {
list_del(&dmaru->list);
kfree(dmaru);
......@@ -384,12 +375,21 @@ int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev *dev)
{
struct dmar_drhd_unit *drhd = NULL;
struct dmar_drhd_unit *dmaru = NULL;
struct acpi_dmar_hardware_unit *drhd;
list_for_each_entry(drhd, &dmar_drhd_units, list) {
if (drhd->include_all || dmar_pci_device_match(drhd->devices,
drhd->devices_cnt, dev))
return drhd;
list_for_each_entry(dmaru, &dmar_drhd_units, list) {
drhd = container_of(dmaru->hdr,
struct acpi_dmar_hardware_unit,
header);
if (dmaru->include_all &&
drhd->segment == pci_domain_nr(dev->bus))
return dmaru;
if (dmar_pci_device_match(dmaru->devices,
dmaru->devices_cnt, dev))
return dmaru;
}
return NULL;
......@@ -491,6 +491,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
int map_size;
u32 ver;
static int iommu_allocated = 0;
int agaw;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
......@@ -506,6 +507,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
iommu->agaw = agaw;
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap));
......
This diff is collapsed.
......@@ -9,148 +9,16 @@
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
/*
* 0: Present
* 1-11: Reserved
* 12-63: Context Ptr (12 - (haw-1))
* 64-127: Reserved
*/
struct root_entry {
u64 val;
u64 rsvd1;
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
static inline bool root_present(struct root_entry *root)
{
return (root->val & 1);
}
static inline void set_root_present(struct root_entry *root)
{
root->val |= 1;
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
root->val |= value & VTD_PAGE_MASK;
}
struct context_entry;
static inline struct context_entry *
get_context_addr_from_root(struct root_entry *root)
{
return (struct context_entry *)
(root_present(root)?phys_to_virt(
root->val & VTD_PAGE_MASK) :
NULL);
}
/*
* low 64 bits:
* 0: present
* 1: fault processing disable
* 2-3: translation type
* 12-63: address space root
* high 64 bits:
* 0-2: address width
* 3-6: aval
* 8-23: domain id
*/
struct context_entry {
u64 lo;
u64 hi;
};
#define context_present(c) ((c).lo & 1)
#define context_fault_disable(c) (((c).lo >> 1) & 1)
#define context_translation_type(c) (((c).lo >> 2) & 3)
#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
#define context_address_width(c) ((c).hi & 7)
#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
#define context_set_present(c) do {(c).lo |= 1;} while (0)
#define context_set_fault_enable(c) \
do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
#define context_set_translation_type(c, val) \
do { \
(c).lo &= (((u64)-1) << 4) | 3; \
(c).lo |= ((val) & 3) << 2; \
} while (0)
#define CONTEXT_TT_MULTI_LEVEL 0
#define context_set_address_root(c, val) \
do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
#define context_set_domain_id(c, val) \
do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
/*
* 0: readable
* 1: writable
* 2-6: reserved
* 7: super page
* 8-11: available
* 12-63: Host physcial address
*/
struct dma_pte {
u64 val;
};
#define dma_clear_pte(p) do {(p).val = 0;} while (0)
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
#define dma_set_pte_prot(p, prot) \
do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
#define dma_set_pte_addr(p, addr) do {\
(p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
#define dma_pte_present(p) (((p).val & 3) != 0)
struct intel_iommu;
struct dmar_domain;
struct root_entry;
struct dmar_domain {
int id; /* domain id */
struct intel_iommu *iommu; /* back pointer to owning iommu */
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
spinlock_t mapping_lock; /* page table lock */
int gaw; /* max guest address width */
/* adjusted guest address width, 0 is level 2 30-bit */
int agaw;
#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
int flags;
};
/* PCI domain-device relationship */
struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
u8 bus; /* PCI bus numer */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
struct dmar_domain *domain; /* pointer to domain */
};
extern int init_dmars(void);
extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int dmar_disabled;
#ifndef CONFIG_DMAR_GFX_WA
static inline void iommu_prepare_gfx_mapping(void)
{
return;
}
#endif /* !CONFIG_DMAR_GFX_WA */
#endif
......@@ -144,7 +144,6 @@ struct dmar_rmrr_unit {
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
extern int dmar_disabled;
#else
static inline int intel_iommu_init(void)
{
......
......@@ -23,8 +23,6 @@
#define _INTEL_IOMMU_H_
#include <linux/types.h>
#include <linux/msi.h>
#include <linux/sysdev.h>
#include <linux/iova.h>
#include <linux/io.h>
#include <linux/dma_remapping.h>
......@@ -289,10 +287,10 @@ struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 cap;
u64 ecap;
int seg;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
#ifdef CONFIG_DMAR
unsigned long *domain_ids; /* bitmap of domains */
......@@ -302,8 +300,6 @@ struct intel_iommu {
unsigned int irq;
unsigned char name[7]; /* Device Name */
struct msi_msg saved_msg;
struct sys_device sysdev;
struct iommu_flush flush;
#endif
struct q_inval *qi; /* Queued invalidation info */
......@@ -334,25 +330,6 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
void intel_iommu_domain_exit(struct dmar_domain *domain);
struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev);
int intel_iommu_context_mapping(struct dmar_domain *domain,
struct pci_dev *pdev);
int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
u64 hpa, size_t size, int prot);
void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn);
struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev);
u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova);
#ifdef CONFIG_DMAR
int intel_iommu_found(void);
#else /* CONFIG_DMAR */
static inline int intel_iommu_found(void)
{
return 0;
}
#endif /* CONFIG_DMAR */
extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
......
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_IOMMU_H
#define __LINUX_IOMMU_H
#define IOMMU_READ (1)
#define IOMMU_WRITE (2)
struct device;
struct iommu_domain {
void *priv;
};
struct iommu_ops {
int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain);
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
void (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
unsigned long iova);
};
#ifdef CONFIG_IOMMU_API
extern void register_iommu(struct iommu_ops *ops);
extern bool iommu_found(void);
extern struct iommu_domain *iommu_domain_alloc(void);
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova);
#else /* CONFIG_IOMMU_API */
static inline void register_iommu(struct iommu_ops *ops)
{
}
static inline bool iommu_found(void)
{
return false;
}
static inline struct iommu_domain *iommu_domain_alloc(void)
{
return NULL;
}
static inline void iommu_domain_free(struct iommu_domain *domain)
{
}
static inline int iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
return -ENODEV;
}
static inline void iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
}
static inline int iommu_map_range(struct iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
return -ENODEV;
}
static inline void iommu_unmap_range(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
}
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
return 0;
}
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
......@@ -316,6 +316,7 @@ struct kvm_assigned_dev_kernel {
#define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9)
unsigned long irq_requested_type;
int irq_source_id;
int flags;
struct pci_dev *dev;
struct kvm *kvm;
};
......@@ -327,13 +328,16 @@ void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#ifdef CONFIG_DMAR
#ifdef CONFIG_IOMMU_API
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
unsigned long npages);
int kvm_iommu_map_guest(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev);
int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
#else /* CONFIG_DMAR */
int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev);
int kvm_deassign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev);
#else /* CONFIG_IOMMU_API */
static inline int kvm_iommu_map_pages(struct kvm *kvm,
gfn_t base_gfn,
unsigned long npages)
......@@ -341,9 +345,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
return 0;
}
static inline int kvm_iommu_map_guest(struct kvm *kvm,
struct kvm_assigned_dev_kernel
*assigned_dev)
static inline int kvm_iommu_map_guest(struct kvm *kvm)
{
return -ENODEV;
}
......@@ -352,7 +354,19 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
{
return 0;
}
#endif /* CONFIG_DMAR */
static inline int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
return 0;
}
static inline int kvm_deassign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
return 0;
}
#endif /* CONFIG_IOMMU_API */
static inline void kvm_guest_enter(void)
{
......
......@@ -25,6 +25,7 @@
#include <linux/kvm_host.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
static int kvm_iommu_unmap_memslots(struct kvm *kvm);
......@@ -37,7 +38,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
gfn_t gfn = base_gfn;
pfn_t pfn;
int i, r = 0;
struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
struct iommu_domain *domain = kvm->arch.iommu_domain;
/* check if iommu exists and in use */
if (!domain)
......@@ -45,20 +46,17 @@ int kvm_iommu_map_pages(struct kvm *kvm,
for (i = 0; i < npages; i++) {
/* check if already mapped */
pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
gfn_to_gpa(gfn));
if (pfn)
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
continue;
pfn = gfn_to_pfn(kvm, gfn);
r = intel_iommu_page_mapping(domain,
r = iommu_map_range(domain,
gfn_to_gpa(gfn),
pfn_to_hpa(pfn),
PAGE_SIZE,
DMA_PTE_READ |
DMA_PTE_WRITE);
IOMMU_READ | IOMMU_WRITE);
if (r) {
printk(KERN_ERR "kvm_iommu_map_pages:"
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%lx\n", pfn);
goto unmap_pages;
}
......@@ -73,7 +71,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int i, r;
int i, r = 0;
down_read(&kvm->slots_lock);
for (i = 0; i < kvm->nmemslots; i++) {
......@@ -86,50 +84,79 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
return r;
}
int kvm_iommu_map_guest(struct kvm *kvm,
int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
struct pci_dev *pdev = NULL;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int r;
if (!intel_iommu_found()) {
printk(KERN_ERR "%s: intel iommu not found\n", __func__);
/* check if iommu exists and in use */
if (!domain)
return 0;
pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
r = iommu_attach_device(domain, &pdev->dev);
if (r) {
printk(KERN_ERR "assign device %x:%x.%x failed",
pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
return r;
}
printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n",
printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
assigned_dev->host_busnr,
PCI_SLOT(assigned_dev->host_devfn),
PCI_FUNC(assigned_dev->host_devfn));
return 0;
}
int kvm_deassign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
struct iommu_domain *domain = kvm->arch.iommu_domain;
struct pci_dev *pdev = NULL;
/* check if iommu exists and in use */
if (!domain)
return 0;
pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
if (pdev == NULL) {
if (kvm->arch.intel_iommu_domain) {
intel_iommu_domain_exit(kvm->arch.intel_iommu_domain);
kvm->arch.intel_iommu_domain = NULL;
}
iommu_detach_device(domain, &pdev->dev);
printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n",
assigned_dev->host_busnr,
PCI_SLOT(assigned_dev->host_devfn),
PCI_FUNC(assigned_dev->host_devfn));
return 0;
}
int kvm_iommu_map_guest(struct kvm *kvm)
{
int r;
if (!iommu_found()) {
printk(KERN_ERR "%s: iommu not found\n", __func__);
return -ENODEV;
}
kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev);
if (!kvm->arch.intel_iommu_domain)
return -ENODEV;
kvm->arch.iommu_domain = iommu_domain_alloc();
if (!kvm->arch.iommu_domain)
return -ENOMEM;
r = kvm_iommu_map_memslots(kvm);
if (r)
goto out_unmap;
intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
pdev->bus->number, pdev->devfn);
r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
pdev);
if (r) {
printk(KERN_ERR "Domain context map for %s failed",
pci_name(pdev));
goto out_unmap;
}
return 0;
out_unmap:
......@@ -142,15 +169,22 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
{
gfn_t gfn = base_gfn;
pfn_t pfn;
struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
int i;
struct iommu_domain *domain = kvm->arch.iommu_domain;
unsigned long i;
u64 phys;
/* check if iommu exists and in use */
if (!domain)
return;
for (i = 0; i < npages; i++) {
pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
gfn_to_gpa(gfn));
phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
pfn = phys >> PAGE_SHIFT;
kvm_release_pfn_clean(pfn);
gfn++;
}
iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages);
}
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
......@@ -168,24 +202,13 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
int kvm_iommu_unmap_guest(struct kvm *kvm)
{
struct kvm_assigned_dev_kernel *entry;
struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
struct iommu_domain *domain = kvm->arch.iommu_domain;
/* check if iommu exists and in use */
if (!domain)
return 0;
list_for_each_entry(entry, &kvm->arch.assigned_dev_head, list) {
printk(KERN_DEBUG "VT-d unmap: host bdf = %x:%x:%x\n",
entry->host_busnr,
PCI_SLOT(entry->host_devfn),
PCI_FUNC(entry->host_devfn));
/* detach kvm dmar domain */
intel_iommu_detach_dev(domain, entry->host_busnr,
entry->host_devfn);
}
kvm_iommu_unmap_memslots(kvm);
intel_iommu_domain_exit(domain);
iommu_domain_free(domain);
return 0;
}
......@@ -496,6 +496,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
match->assigned_dev_id = assigned_dev->assigned_dev_id;
match->host_busnr = assigned_dev->busnr;
match->host_devfn = assigned_dev->devfn;
match->flags = assigned_dev->flags;
match->dev = dev;
match->irq_source_id = -1;
match->kvm = kvm;
......@@ -503,7 +504,12 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
list_add(&match->list, &kvm->arch.assigned_dev_head);
if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
r = kvm_iommu_map_guest(kvm, match);
if (!kvm->arch.iommu_domain) {
r = kvm_iommu_map_guest(kvm);
if (r)
goto out_list_del;
}
r = kvm_assign_device(kvm, match);
if (r)
goto out_list_del;
}
......@@ -525,6 +531,35 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
}
#endif
#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
int r = 0;
struct kvm_assigned_dev_kernel *match;
mutex_lock(&kvm->lock);
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
assigned_dev->assigned_dev_id);
if (!match) {
printk(KERN_INFO "%s: device hasn't been assigned before, "
"so cannot be deassigned\n", __func__);
r = -EINVAL;
goto out;
}
if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
kvm_deassign_device(kvm, match);
kvm_free_assigned_device(kvm, match);
out:
mutex_unlock(&kvm->lock);
return r;
}
#endif
static inline int valid_vcpu(int n)
{
return likely(n >= 0 && n < KVM_MAX_VCPUS);
......@@ -1857,6 +1892,19 @@ static long kvm_vm_ioctl(struct file *filp,
goto out;
break;
}
#endif
#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
case KVM_DEASSIGN_PCI_DEVICE: {
struct kvm_assigned_pci_dev assigned_dev;
r = -EFAULT;
if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
goto out;
r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
if (r)
goto out;
break;
}
#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment