Commit 3e2a590a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull iommu fixes from Will Deacon:
 "This is mainly all Intel VT-D stuff, but there are some fixes for AMD
  and ARM as well.

  We've also got the revert I promised during the merge window, which
  removes a temporary hack to accomodate i915 while we transitioned the
  Intel IOMMU driver over to the common DMA-IOMMU API.

  Finally, there are still a couple of other VT-D fixes floating around,
  so I expect to send you another batch of fixes next week.

  Summary:

   - Fix VT-D TLB invalidation for subdevices

   - Fix VT-D use-after-free on subdevice detach

   - Fix VT-D locking so that IRQs are disabled during SVA bind/unbind

   - Fix VT-D address alignment when flushing IOTLB

   - Fix memory leak in VT-D IRQ remapping failure path

   - Revert temporary i915 sglist hack now that it is no longer required

   - Fix sporadic boot failure with Arm SMMU on Qualcomm SM8150

   - Fix NULL dereference in AMD IRQ remapping code with remapping disabled

   - Fix accidental enabling of irqs on AMD resume-from-suspend path

   - Fix some typos in comments"

* tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  iommu/vt-d: Fix ineffective devTLB invalidation for subdevices
  iommu/vt-d: Fix general protection fault in aux_detach_device()
  iommu/vt-d: Move intel_iommu info from struct intel_svm to struct intel_svm_dev
  iommu/arm-smmu-qcom: Initialize SCTLR of the bypass context
  iommu/vt-d: Fix lockdep splat in sva bind()/unbind()
  Revert "iommu: Add quirk for Intel graphic devices in map_sg"
  iommu/vt-d: Fix misuse of ALIGN in qi_flush_piotlb()
  iommu/amd: Stop irq_remapping_select() matching when remapping is disabled
  iommu/amd: Set iommu->int_enabled consistently when interrupts are set up
  iommu/intel: Fix memleak in intel_irq_remapping_alloc
  iommu/iova: fix 'domain' typos
parents 95f05058 7c29ada5
...@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu) ...@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return r; return r;
} }
iommu->int_enabled = true;
return 0; return 0;
} }
...@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu) ...@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
if (ret) if (ret)
return ret; return ret;
iommu->int_enabled = true;
enable_faults: enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
......
...@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec, ...@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
struct amd_iommu *iommu; struct amd_iommu *iommu;
int devid = -1; int devid = -1;
if (!amd_iommu_irq_remap)
return 0;
if (x86_fwspec_is_ioapic(fwspec)) if (x86_fwspec_is_ioapic(fwspec))
devid = get_ioapic_devid(fwspec->param[0]); devid = get_ioapic_devid(fwspec->param[0]);
else if (x86_fwspec_is_hpet(fwspec)) else if (x86_fwspec_is_hpet(fwspec))
......
...@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) ...@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
set_bit(qsmmu->bypass_cbndx, smmu->context_map); set_bit(qsmmu->bypass_cbndx, smmu->context_map);
arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS); reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg); arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
} }
......
...@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0; int i, count = 0;
/*
* The Intel graphic driver is used to assume that the returned
* sg list is not combound. This blocks the efforts of converting
* Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
* device driver work and should be removed once it's fixed in i915
* driver.
*/
if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
(to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
for_each_sg(sg, s, nents, i) {
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
s->offset += s_iova_off;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_iova_off;
sg_dma_len(s) = s_length;
dma_addr += s_iova_len;
pr_info_once("sg combining disabled due to i915 driver\n");
}
return nents;
}
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */ /* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s); unsigned int s_iova_off = sg_dma_address(s);
......
...@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, ...@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages)); int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
if (WARN_ON_ONCE(!ALIGN(addr, align))) if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
addr &= ~(align - 1); addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) | desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) | QI_EIOTLB_DID(did) |
......
...@@ -719,6 +719,8 @@ static int domain_update_device_node(struct dmar_domain *domain) ...@@ -719,6 +719,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid; return nid;
} }
static void domain_update_iotlb(struct dmar_domain *domain);
/* Some capabilities may be different across iommus */ /* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain) static void domain_update_iommu_cap(struct dmar_domain *domain)
{ {
...@@ -744,6 +746,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) ...@@ -744,6 +746,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
domain_update_iotlb(domain);
} }
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
...@@ -1464,17 +1468,22 @@ static void domain_update_iotlb(struct dmar_domain *domain) ...@@ -1464,17 +1468,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock); assert_spin_locked(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) { list_for_each_entry(info, &domain->devices, link)
struct pci_dev *pdev; if (info->ats_enabled) {
if (!info->dev || !dev_is_pci(info->dev))
continue;
pdev = to_pci_dev(info->dev);
if (pdev->ats_enabled) {
has_iotlb_device = true; has_iotlb_device = true;
break; break;
} }
if (!has_iotlb_device) {
struct subdev_domain_info *sinfo;
list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
info = get_domain_info(sinfo->pdev);
if (info && info->ats_enabled) {
has_iotlb_device = true;
break;
}
}
} }
domain->has_iotlb_device = has_iotlb_device; domain->has_iotlb_device = has_iotlb_device;
...@@ -1555,25 +1564,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) ...@@ -1555,25 +1564,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif #endif
} }
static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
u64 addr, unsigned int mask)
{
u16 sid, qdep;
if (!info || !info->ats_enabled)
return;
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain, static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask) u64 addr, unsigned mask)
{ {
u16 sid, qdep;
unsigned long flags; unsigned long flags;
struct device_domain_info *info; struct device_domain_info *info;
struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device) if (!domain->has_iotlb_device)
return; return;
spin_lock_irqsave(&device_domain_lock, flags); spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) { list_for_each_entry(info, &domain->devices, link)
if (!info->ats_enabled) __iommu_flush_dev_iotlb(info, addr, mask);
continue;
sid = info->bus << 8 | info->devfn; list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
qdep = info->ats_qdep; info = get_domain_info(sinfo->pdev);
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, __iommu_flush_dev_iotlb(info, addr, mask);
qdep, addr, mask);
} }
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
} }
...@@ -1877,6 +1898,7 @@ static struct dmar_domain *alloc_domain(int flags) ...@@ -1877,6 +1898,7 @@ static struct dmar_domain *alloc_domain(int flags)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false; domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices); INIT_LIST_HEAD(&domain->devices);
INIT_LIST_HEAD(&domain->subdevices);
return domain; return domain;
} }
...@@ -2547,7 +2569,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, ...@@ -2547,7 +2569,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu; info->iommu = iommu;
info->pasid_table = NULL; info->pasid_table = NULL;
info->auxd_enabled = 0; info->auxd_enabled = 0;
INIT_LIST_HEAD(&info->auxiliary_domains); INIT_LIST_HEAD(&info->subdevices);
if (dev && dev_is_pci(dev)) { if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev); struct pci_dev *pdev = to_pci_dev(info->dev);
...@@ -4475,33 +4497,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain) ...@@ -4475,33 +4497,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_UNMANAGED; domain->type == IOMMU_DOMAIN_UNMANAGED;
} }
static void auxiliary_link_device(struct dmar_domain *domain, static inline struct subdev_domain_info *
struct device *dev) lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
{
struct subdev_domain_info *sinfo;
if (!list_empty(&domain->subdevices)) {
list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
if (sinfo->pdev == dev)
return sinfo;
}
}
return NULL;
}
static int auxiliary_link_device(struct dmar_domain *domain,
struct device *dev)
{ {
struct device_domain_info *info = get_domain_info(dev); struct device_domain_info *info = get_domain_info(dev);
struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
assert_spin_locked(&device_domain_lock); assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info)) if (WARN_ON(!info))
return; return -EINVAL;
if (!sinfo) {
sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
sinfo->domain = domain;
sinfo->pdev = dev;
list_add(&sinfo->link_phys, &info->subdevices);
list_add(&sinfo->link_domain, &domain->subdevices);
}
domain->auxd_refcnt++; return ++sinfo->users;
list_add(&domain->auxd, &info->auxiliary_domains);
} }
static void auxiliary_unlink_device(struct dmar_domain *domain, static int auxiliary_unlink_device(struct dmar_domain *domain,
struct device *dev) struct device *dev)
{ {
struct device_domain_info *info = get_domain_info(dev); struct device_domain_info *info = get_domain_info(dev);
struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
int ret;
assert_spin_locked(&device_domain_lock); assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info)) if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
return; return -EINVAL;
list_del(&domain->auxd); ret = --sinfo->users;
domain->auxd_refcnt--; if (!ret) {
list_del(&sinfo->link_phys);
list_del(&sinfo->link_domain);
kfree(sinfo);
}
if (!domain->auxd_refcnt && domain->default_pasid > 0) return ret;
ioasid_put(domain->default_pasid);
} }
static int aux_domain_add_dev(struct dmar_domain *domain, static int aux_domain_add_dev(struct dmar_domain *domain,
...@@ -4530,6 +4580,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain, ...@@ -4530,6 +4580,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
} }
spin_lock_irqsave(&device_domain_lock, flags); spin_lock_irqsave(&device_domain_lock, flags);
ret = auxiliary_link_device(domain, dev);
if (ret <= 0)
goto link_failed;
/*
* Subdevices from the same physical device can be attached to the
* same domain. For such cases, only the first subdevice attachment
* needs to go through the full steps in this function. So if ret >
* 1, just goto out.
*/
if (ret > 1)
goto out;
/* /*
* iommu->lock must be held to attach domain to iommu and setup the * iommu->lock must be held to attach domain to iommu and setup the
* pasid entry for second level translation. * pasid entry for second level translation.
...@@ -4548,10 +4611,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain, ...@@ -4548,10 +4611,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain->default_pasid); domain->default_pasid);
if (ret) if (ret)
goto table_failed; goto table_failed;
spin_unlock(&iommu->lock);
auxiliary_link_device(domain, dev);
spin_unlock(&iommu->lock);
out:
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
return 0; return 0;
...@@ -4560,8 +4622,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain, ...@@ -4560,8 +4622,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain_detach_iommu(domain, iommu); domain_detach_iommu(domain, iommu);
attach_failed: attach_failed:
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
auxiliary_unlink_device(domain, dev);
link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
if (!domain->auxd_refcnt && domain->default_pasid > 0) if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
ioasid_put(domain->default_pasid); ioasid_put(domain->default_pasid);
return ret; return ret;
...@@ -4581,14 +4645,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain, ...@@ -4581,14 +4645,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
info = get_domain_info(dev); info = get_domain_info(dev);
iommu = info->iommu; iommu = info->iommu;
auxiliary_unlink_device(domain, dev); if (!auxiliary_unlink_device(domain, dev)) {
spin_lock(&iommu->lock);
spin_lock(&iommu->lock); intel_pasid_tear_down_entry(iommu, dev,
intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false); domain->default_pasid, false);
domain_detach_iommu(domain, iommu); domain_detach_iommu(domain, iommu);
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
}
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
ioasid_put(domain->default_pasid);
} }
static int prepare_domain_attach_device(struct iommu_domain *domain, static int prepare_domain_attach_device(struct iommu_domain *domain,
......
...@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, ...@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i); irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data); irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) { if (!irq_data || !irq_cfg) {
if (!i)
kfree(data);
ret = -EINVAL; ret = -EINVAL;
goto out_free_data; goto out_free_data;
} }
......
...@@ -142,7 +142,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d ...@@ -142,7 +142,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
} }
desc.qw2 = 0; desc.qw2 = 0;
desc.qw3 = 0; desc.qw3 = 0;
qi_submit_sync(svm->iommu, &desc, 1, 0); qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) { if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) | desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
...@@ -166,7 +166,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d ...@@ -166,7 +166,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
} }
desc.qw2 = 0; desc.qw2 = 0;
desc.qw3 = 0; desc.qw3 = 0;
qi_submit_sync(svm->iommu, &desc, 1, 0); qi_submit_sync(sdev->iommu, &desc, 1, 0);
} }
} }
...@@ -211,7 +211,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -211,7 +211,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/ */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) list_for_each_entry_rcu(sdev, &svm->devs, list)
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true); svm->pasid, true);
rcu_read_unlock(); rcu_read_unlock();
...@@ -281,6 +281,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, ...@@ -281,6 +281,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain; struct dmar_domain *dmar_domain;
struct device_domain_info *info; struct device_domain_info *info;
struct intel_svm *svm = NULL; struct intel_svm *svm = NULL;
unsigned long iflags;
int ret = 0; int ret = 0;
if (WARN_ON(!iommu) || !data) if (WARN_ON(!iommu) || !data)
...@@ -363,6 +364,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, ...@@ -363,6 +364,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
} }
sdev->dev = dev; sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn); sdev->sid = PCI_DEVID(info->bus, info->devfn);
sdev->iommu = iommu;
/* Only count users if device has aux domains */ /* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
...@@ -381,12 +383,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, ...@@ -381,12 +383,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to * each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here. * call the nested mode setup function here.
*/ */
spin_lock(&iommu->lock); spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev, ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd, (pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain, data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width); data->addr_width);
spin_unlock(&iommu->lock); spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) { if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n", dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret); data->hpasid, ret);
...@@ -486,6 +488,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, ...@@ -486,6 +488,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info; struct device_domain_info *info;
struct intel_svm_dev *sdev; struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL; struct intel_svm *svm = NULL;
unsigned long iflags;
int pasid_max; int pasid_max;
int ret; int ret;
...@@ -546,6 +549,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, ...@@ -546,6 +549,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out; goto out;
} }
sdev->dev = dev; sdev->dev = dev;
sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev); ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) { if (ret) {
...@@ -575,7 +579,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, ...@@ -575,7 +579,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev); kfree(sdev);
goto out; goto out;
} }
svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id) if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id; pasid_max = intel_pasid_max_id;
...@@ -605,14 +608,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, ...@@ -605,14 +608,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
} }
} }
spin_lock(&iommu->lock); spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev, ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd, mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID, svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) | (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ? (cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0)); PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock); spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) { if (ret) {
if (mm) if (mm)
mmu_notifier_unregister(&svm->notifier, mm); mmu_notifier_unregister(&svm->notifier, mm);
...@@ -632,14 +635,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, ...@@ -632,14 +635,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup * Binding a new device with existing PASID, need to setup
* the PASID entry. * the PASID entry.
*/ */
spin_lock(&iommu->lock); spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev, ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd, mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID, svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) | (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ? (cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0)); PASID_FLAG_FL5LP : 0));
spin_unlock(&iommu->lock); spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) { if (ret) {
kfree(sdev); kfree(sdev);
goto out; goto out;
......
...@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova) ...@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
* @iovad: - iova domain in question. * @iovad: - iova domain in question.
* @pfn: - page frame number * @pfn: - page frame number
* This function finds and returns an iova belonging to the * This function finds and returns an iova belonging to the
* given doamin which matches the given pfn. * given domain which matches the given pfn.
*/ */
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{ {
...@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad, ...@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(queue_iova); EXPORT_SYMBOL_GPL(queue_iova);
/** /**
* put_iova_domain - destroys the iova doamin * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question. * @iovad: - iova domain in question.
* All the iova's in that domain are destroyed. * All the iova's in that domain are destroyed.
*/ */
...@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova); ...@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
/** /**
* copy_reserved_iova - copies the reserved between domains * copy_reserved_iova - copies the reserved between domains
* @from: - source doamin from where to copy * @from: - source domain from where to copy
* @to: - destination domin where to copy * @to: - destination domin where to copy
* This function copies reserved iova's from one doamin to * This function copies reserved iova's from one domain to
* other. * other.
*/ */
void void
......
...@@ -533,11 +533,10 @@ struct dmar_domain { ...@@ -533,11 +533,10 @@ struct dmar_domain {
/* Domain ids per IOMMU. Use u16 since /* Domain ids per IOMMU. Use u16 since
* domain ids are 16 bit wide according * domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */ * to VT-d spec, section 9.3 */
unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */
bool has_iotlb_device; bool has_iotlb_device;
struct list_head devices; /* all devices' list */ struct list_head devices; /* all devices' list */
struct list_head auxd; /* link to device's auxiliary list */ struct list_head subdevices; /* all subdevices' list */
struct iova_domain iovad; /* iova's that belong to this domain */ struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */ struct dma_pte *pgd; /* virtual address */
...@@ -610,14 +609,21 @@ struct intel_iommu { ...@@ -610,14 +609,21 @@ struct intel_iommu {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
}; };
/* Per subdevice private data */
struct subdev_domain_info {
struct list_head link_phys; /* link to phys device siblings */
struct list_head link_domain; /* link to domain siblings */
struct device *pdev; /* physical device derived from */
struct dmar_domain *domain; /* aux-domain */
int users; /* user count */
};
/* PCI domain-device relationship */ /* PCI domain-device relationship */
struct device_domain_info { struct device_domain_info {
struct list_head link; /* link to domain siblings */ struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */ struct list_head global; /* link to global list */
struct list_head table; /* link to pasid table */ struct list_head table; /* link to pasid table */
struct list_head auxiliary_domains; /* auxiliary domains struct list_head subdevices; /* subdevices sibling */
* attached to this device
*/
u32 segment; /* PCI segment number */ u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */ u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */ u8 devfn; /* PCI devfn number */
...@@ -758,6 +764,7 @@ struct intel_svm_dev { ...@@ -758,6 +764,7 @@ struct intel_svm_dev {
struct list_head list; struct list_head list;
struct rcu_head rcu; struct rcu_head rcu;
struct device *dev; struct device *dev;
struct intel_iommu *iommu;
struct svm_dev_ops *ops; struct svm_dev_ops *ops;
struct iommu_sva sva; struct iommu_sva sva;
u32 pasid; u32 pasid;
...@@ -771,7 +778,6 @@ struct intel_svm { ...@@ -771,7 +778,6 @@ struct intel_svm {
struct mmu_notifier notifier; struct mmu_notifier notifier;
struct mm_struct *mm; struct mm_struct *mm;
struct intel_iommu *iommu;
unsigned int flags; unsigned int flags;
u32 pasid; u32 pasid;
int gpasid; /* In case that guest PASID is different from host PASID */ int gpasid; /* In case that guest PASID is different from host PASID */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment