Commit 95587a75 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Add per-device IOMMU feature ops entries

This adds the iommu ops entries for aux-domain per-device
feature query and enable/disable.

Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: default avatarSanjay Kumar <sanjay.k.kumar@intel.com>
Signed-off-by: default avatarLiu Yi L <yi.l.liu@intel.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent d7cbc0f3
...@@ -2485,6 +2485,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, ...@@ -2485,6 +2485,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->domain = domain; info->domain = domain;
info->iommu = iommu; info->iommu = iommu;
info->pasid_table = NULL; info->pasid_table = NULL;
info->auxd_enabled = 0;
if (dev && dev_is_pci(dev)) { if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev); struct pci_dev *pdev = to_pci_dev(info->dev);
...@@ -5223,6 +5224,42 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -5223,6 +5224,42 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys; return phys;
} }
static inline bool scalable_mode_support(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool ret = true;
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (!sm_supported(iommu)) {
ret = false;
break;
}
}
rcu_read_unlock();
return ret;
}
static inline bool iommu_pasid_support(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool ret = true;
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (!pasid_supported(iommu)) {
ret = false;
break;
}
}
rcu_read_unlock();
return ret;
}
static bool intel_iommu_capable(enum iommu_cap cap) static bool intel_iommu_capable(enum iommu_cap cap)
{ {
if (cap == IOMMU_CAP_CACHE_COHERENCY) if (cap == IOMMU_CAP_CACHE_COHERENCY)
...@@ -5380,6 +5417,124 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) ...@@ -5380,6 +5417,124 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
} }
#endif /* CONFIG_INTEL_IOMMU_SVM */ #endif /* CONFIG_INTEL_IOMMU_SVM */
static int intel_iommu_enable_auxd(struct device *dev)
{
struct device_domain_info *info;
struct intel_iommu *iommu;
unsigned long flags;
u8 bus, devfn;
int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu || dmar_disabled)
return -EINVAL;
if (!sm_supported(iommu) || !pasid_supported(iommu))
return -EINVAL;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret)
return -ENODEV;
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
info->auxd_enabled = 1;
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
}
static int intel_iommu_disable_auxd(struct device *dev)
{
struct device_domain_info *info;
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
if (!WARN_ON(!info))
info->auxd_enabled = 0;
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
}
/*
* A PCI express designated vendor specific extended capability is defined
* in the section 3.7 of Intel scalable I/O virtualization technical spec
* for system software and tools to detect endpoint devices supporting the
* Intel scalable IO virtualization without host driver dependency.
*
* Returns the address of the matching extended capability structure within
* the device's PCI configuration space or 0 if the device does not support
* it.
*/
static int siov_find_pci_dvsec(struct pci_dev *pdev)
{
int pos;
u16 vendor, id;
pos = pci_find_next_ext_capability(pdev, 0, 0x23);
while (pos) {
pci_read_config_word(pdev, pos + 4, &vendor);
pci_read_config_word(pdev, pos + 8, &id);
if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
return pos;
pos = pci_find_next_ext_capability(pdev, pos, 0x23);
}
return 0;
}
static bool
intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
{
if (feat == IOMMU_DEV_FEAT_AUX) {
int ret;
if (!dev_is_pci(dev) || dmar_disabled ||
!scalable_mode_support() || !iommu_pasid_support())
return false;
ret = pci_pasid_features(to_pci_dev(dev));
if (ret < 0)
return false;
return !!siov_find_pci_dvsec(to_pci_dev(dev));
}
return false;
}
static int
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
{
if (feat == IOMMU_DEV_FEAT_AUX)
return intel_iommu_enable_auxd(dev);
return -ENODEV;
}
static int
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{
if (feat == IOMMU_DEV_FEAT_AUX)
return intel_iommu_disable_auxd(dev);
return -ENODEV;
}
static bool
intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
{
struct device_domain_info *info = dev->archdata.iommu;
if (feat == IOMMU_DEV_FEAT_AUX)
return scalable_mode_support() && info && info->auxd_enabled;
return false;
}
const struct iommu_ops intel_iommu_ops = { const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable, .capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc, .domain_alloc = intel_iommu_domain_alloc,
...@@ -5394,6 +5549,10 @@ const struct iommu_ops intel_iommu_ops = { ...@@ -5394,6 +5549,10 @@ const struct iommu_ops intel_iommu_ops = {
.get_resv_regions = intel_iommu_get_resv_regions, .get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions, .put_resv_regions = intel_iommu_put_resv_regions,
.device_group = pci_device_group, .device_group = pci_device_group,
.dev_has_feat = intel_iommu_dev_has_feat,
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES, .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
}; };
......
...@@ -568,6 +568,7 @@ struct device_domain_info { ...@@ -568,6 +568,7 @@ struct device_domain_info {
u8 pri_enabled:1; u8 pri_enabled:1;
u8 ats_supported:1; u8 ats_supported:1;
u8 ats_enabled:1; u8 ats_enabled:1;
u8 auxd_enabled:1; /* Multiple domains per device */
u8 ats_qdep; u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */ struct intel_iommu *iommu; /* IOMMU used by this device */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment