Commit 105ecadc authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/intel-iommu

Pull IOMMU fixes from David Woodhouse:
 "Two minor fixes.

  The first fixes the assignment of SR-IOV virtual functions to the
  correct IOMMU unit, and the second fixes the excessively large (and
  physically contiguous) PASID tables used with SVM"

* git://git.infradead.org/intel-iommu:
  iommu/vt-d: Fix PASID table allocation
  iommu/vt-d: Fix IOMMU lookup for SR-IOV Virtual Functions
parents ff17bf8a 91017044
...@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, ...@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
struct pci_dev *pdev = to_pci_dev(data); struct pci_dev *pdev = to_pci_dev(data);
struct dmar_pci_notify_info *info; struct dmar_pci_notify_info *info;
/* Only care about add/remove events for physical functions */ /* Only care about add/remove events for physical functions.
* For VFs we actually do the lookup based on the corresponding
* PF in device_to_iommu() anyway. */
if (pdev->is_virtfn) if (pdev->is_virtfn)
return NOTIFY_DONE; return NOTIFY_DONE;
if (action != BUS_NOTIFY_ADD_DEVICE && if (action != BUS_NOTIFY_ADD_DEVICE &&
......
...@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf ...@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
return NULL; return NULL;
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
struct pci_dev *pf_pdev;
pdev = to_pci_dev(dev); pdev = to_pci_dev(dev);
/* VFs aren't listed in scope tables; we need to look up
* the PF instead to find the IOMMU. */
pf_pdev = pci_physfn(pdev);
dev = &pf_pdev->dev;
segment = pci_domain_nr(pdev->bus); segment = pci_domain_nr(pdev->bus);
} else if (has_acpi_companion(dev)) } else if (has_acpi_companion(dev))
dev = &ACPI_COMPANION(dev)->dev; dev = &ACPI_COMPANION(dev)->dev;
...@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf ...@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
for_each_active_dev_scope(drhd->devices, for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, tmp) { drhd->devices_cnt, i, tmp) {
if (tmp == dev) { if (tmp == dev) {
/* For a VF use its original BDF# not that of the PF
* which we used for the IOMMU lookup. Strictly speaking
* we could do this for all PCI devices; we only need to
* get the BDF# from the scope table for ACPI matches. */
if (pdev->is_virtfn)
goto got_pdev;
*bus = drhd->devices[i].bus; *bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn; *devfn = drhd->devices[i].devfn;
goto out; goto out;
......
...@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) ...@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
struct page *pages; struct page *pages;
int order; int order;
order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; /* Start at 2 because it's defined as 2^(1+PSS) */
if (order < 0) iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
order = 0;
/* Eventually I'm promised we will get a multi-level PASID table
* and it won't have to be physically contiguous. Until then,
* limit the size because 8MiB contiguous allocations can be hard
* to come by. The limit of 0x20000, which is 1MiB for each of
* the PASID and PASID-state tables, is somewhat arbitrary. */
if (iommu->pasid_max > 0x20000)
iommu->pasid_max = 0x20000;
order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages) { if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate PASID table\n", pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
...@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) ...@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
if (ecap_dis(iommu->ecap)) { if (ecap_dis(iommu->ecap)) {
/* Just making it explicit... */
BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (pages) if (pages)
iommu->pasid_state_table = page_address(pages); iommu->pasid_state_table = page_address(pages);
...@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) ...@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
int intel_svm_free_pasid_tables(struct intel_iommu *iommu) int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
{ {
int order; int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
if (order < 0)
order = 0;
if (iommu->pasid_table) { if (iommu->pasid_table) {
free_pages((unsigned long)iommu->pasid_table, order); free_pages((unsigned long)iommu->pasid_table, order);
...@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
} }
svm->iommu = iommu; svm->iommu = iommu;
if (pasid_max > 2 << ecap_pss(iommu->ecap)) if (pasid_max > iommu->pasid_max)
pasid_max = 2 << ecap_pss(iommu->ecap); pasid_max = iommu->pasid_max;
/* Do not use PASID 0 in caching mode (virtualised IOMMU) */ /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
ret = idr_alloc(&iommu->pasid_idr, svm, ret = idr_alloc(&iommu->pasid_idr, svm,
......
...@@ -429,6 +429,7 @@ struct intel_iommu { ...@@ -429,6 +429,7 @@ struct intel_iommu {
struct page_req_dsc *prq; struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */ unsigned char prq_name[16]; /* Name for PRQ interrupt */
struct idr pasid_idr; struct idr pasid_idr;
u32 pasid_max;
#endif #endif
struct q_inval *qi; /* Queued invalidation info */ struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/ u32 *iommu_state; /* Store iommu states between suspend and resume.*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment