Commit 2ccd4502 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Intel VT-d fixes from Lu Baolu:
     - Boot kdump kernels with VT-d scalable mode on
     - Calculate the right page table levels
     - Fix two recursive locking issues
     - Fix a lockdep splat issue

 - AMD IOMMU fixes:
     - Fix for completion-wait command to use full 64 bits of data
     - Fix PASID related issue where GPU sound devices failed to
       initialize

 - Fix for Virtio-IOMMU to report correct caching behavior, needed for
   use with VFIO

* tag 'iommu-fixes-v6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu: Fix false ownership failure on AMD systems with PASID activated
  iommu/vt-d: Fix possible recursive locking in intel_iommu_init()
  iommu/virtio: Fix interaction with VFIO
  iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context
  iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb()
  iommu/vt-d: Correctly calculate sagaw value of IOMMU
  iommu/vt-d: Fix kdump kernels boot failure with scalable mode
  iommu/amd: use full 64-bit value in build_completion_wait()
parents 134984db 2380f1e8
...@@ -939,7 +939,8 @@ static void build_completion_wait(struct iommu_cmd *cmd, ...@@ -939,7 +939,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr); cmd->data[1] = upper_32_bits(paddr);
cmd->data[2] = data; cmd->data[2] = lower_32_bits(data);
cmd->data[3] = upper_32_bits(data);
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
} }
......
...@@ -777,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) ...@@ -777,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (dev_state->domain == NULL) if (dev_state->domain == NULL)
goto out_free_states; goto out_free_states;
/* See iommu_is_default_domain() */
dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
amd_iommu_domain_direct_map(dev_state->domain); amd_iommu_domain_direct_map(dev_state->domain);
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
......
...@@ -2349,6 +2349,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert) ...@@ -2349,6 +2349,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
if (!dmar_in_use()) if (!dmar_in_use())
return 0; return 0;
/*
* It's unlikely that any I/O board is hot added before the IOMMU
* subsystem is initialized.
*/
if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
return -EOPNOTSUPP;
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
tmp = handle; tmp = handle;
} else { } else {
......
This diff is collapsed.
...@@ -197,7 +197,6 @@ ...@@ -197,7 +197,6 @@
#define ecap_dis(e) (((e) >> 27) & 0x1) #define ecap_dis(e) (((e) >> 27) & 0x1)
#define ecap_nest(e) (((e) >> 26) & 0x1) #define ecap_nest(e) (((e) >> 26) & 0x1)
#define ecap_mts(e) (((e) >> 25) & 0x1) #define ecap_mts(e) (((e) >> 25) & 0x1)
#define ecap_ecs(e) (((e) >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1) #define ecap_coherent(e) ((e) & 0x1)
...@@ -265,7 +264,6 @@ ...@@ -265,7 +264,6 @@
#define DMA_GSTS_CFIS (((u32)1) << 23) #define DMA_GSTS_CFIS (((u32)1) << 23)
/* DMA_RTADDR_REG */ /* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)
#define DMA_RTADDR_SMT (((u64)1) << 10) #define DMA_RTADDR_SMT (((u64)1) << 10)
/* CCMD_REG */ /* CCMD_REG */
...@@ -579,6 +577,7 @@ struct intel_iommu { ...@@ -579,6 +577,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */ unsigned long *domain_ids; /* bitmap of domains */
unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */ spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */ struct root_entry *root_entry; /* virtual address */
...@@ -701,6 +700,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte) ...@@ -701,6 +700,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
} }
static inline bool context_present(struct context_entry *context)
{
return (context->lo & 1);
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_enable_qi(struct intel_iommu *iommu); extern int dmar_enable_qi(struct intel_iommu *iommu);
...@@ -784,7 +788,6 @@ static inline void intel_iommu_debugfs_init(void) {} ...@@ -784,7 +788,6 @@ static inline void intel_iommu_debugfs_init(void) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[]; extern const struct attribute_group *intel_iommu_groups[];
bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc); u8 devfn, int alloc);
......
...@@ -3076,6 +3076,24 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, ...@@ -3076,6 +3076,24 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
return ret; return ret;
} }
static bool iommu_is_default_domain(struct iommu_group *group)
{
if (group->domain == group->default_domain)
return true;
/*
* If the default domain was set to identity and it is still an identity
* domain then we consider this a pass. This happens because of
* amd_iommu_init_device() replacing the default idenytity domain with an
* identity domain that has a different configuration for AMDGPU.
*/
if (group->default_domain &&
group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
return true;
return false;
}
/** /**
* iommu_device_use_default_domain() - Device driver wants to handle device * iommu_device_use_default_domain() - Device driver wants to handle device
* DMA through the kernel DMA API. * DMA through the kernel DMA API.
...@@ -3094,8 +3112,7 @@ int iommu_device_use_default_domain(struct device *dev) ...@@ -3094,8 +3112,7 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex); mutex_lock(&group->mutex);
if (group->owner_cnt) { if (group->owner_cnt) {
if (group->domain != group->default_domain || if (group->owner || !iommu_is_default_domain(group)) {
group->owner) {
ret = -EBUSY; ret = -EBUSY;
goto unlock_out; goto unlock_out;
} }
......
...@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1); return iommu_fwspec_add_ids(dev, args->args, 1);
} }
static bool viommu_capable(enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
default:
return false;
}
}
static struct iommu_ops viommu_ops = { static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
.domain_alloc = viommu_domain_alloc, .domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device, .probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize, .probe_finalize = viommu_probe_finalize,
......
...@@ -65,6 +65,7 @@ struct dmar_pci_notify_info { ...@@ -65,6 +65,7 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock; extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units; extern struct list_head dmar_drhd_units;
extern int intel_iommu_enabled;
#define for_each_drhd_unit(drhd) \ #define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
...@@ -88,7 +89,8 @@ extern struct list_head dmar_drhd_units; ...@@ -88,7 +89,8 @@ extern struct list_head dmar_drhd_units;
static inline bool dmar_rcu_check(void) static inline bool dmar_rcu_check(void)
{ {
return rwsem_is_locked(&dmar_global_lock) || return rwsem_is_locked(&dmar_global_lock) ||
system_state == SYSTEM_BOOTING; system_state == SYSTEM_BOOTING ||
(IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
} }
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment