Commit 8ef24c20 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - AMD IOMMU fix to make sure features are detected before they are
   queried.

 - Intel IOMMU address alignment check fix for an IOLTB flushing
   command.

 - Performance fix for Intel IOMMU to make sure the code does not do
   full IOTLB flushes all the time. Those flushes are very expensive
   on emulated IOMMUs.

* tag 'iommu-fixes-v5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Do not use flush-queue when caching-mode is on
  iommu/vt-d: Correctly check addr alignment in qi_flush_dev_iotlb_pasid()
  iommu/amd: Use IVHD EFR for early initialization of IOMMU features
parents 32b0c410 29b32839
...@@ -84,12 +84,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) ...@@ -84,12 +84,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU); (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
} }
static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
{ {
if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) return !!(iommu->features & mask);
return false;
return !!(iommu->features & f);
} }
static inline u64 iommu_virt_to_phys(void *vaddr) static inline u64 iommu_virt_to_phys(void *vaddr)
......
...@@ -387,6 +387,10 @@ ...@@ -387,6 +387,10 @@
#define IOMMU_CAP_NPCACHE 26 #define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27 #define IOMMU_CAP_EFR 27
/* IOMMU IVINFO */
#define IOMMU_IVINFO_OFFSET 36
#define IOMMU_IVINFO_EFRSUP BIT(0)
/* IOMMU Feature Reporting Field (for IVHD type 10h */ /* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6 #define IOMMU_FEAT_GASUP_SHIFT 6
......
...@@ -257,6 +257,8 @@ static void init_device_table_dma(void); ...@@ -257,6 +257,8 @@ static void init_device_table_dma(void);
static bool amd_iommu_pre_enabled = true; static bool amd_iommu_pre_enabled = true;
static u32 amd_iommu_ivinfo __initdata;
bool translation_pre_enabled(struct amd_iommu *iommu) bool translation_pre_enabled(struct amd_iommu *iommu)
{ {
return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
...@@ -296,6 +298,18 @@ int amd_iommu_get_num_iommus(void) ...@@ -296,6 +298,18 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present; return amd_iommus_present;
} }
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
* (i.e. before PCI init).
*/
static void __init early_iommu_features_init(struct amd_iommu *iommu,
struct ivhd_header *h)
{
if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
iommu->features = h->efr_reg;
}
/* Access to l1 and l2 indexed register spaces */ /* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
...@@ -1577,6 +1591,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1577,6 +1591,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
early_iommu_features_init(iommu, h);
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -1770,6 +1787,35 @@ static const struct attribute_group *amd_iommu_groups[] = { ...@@ -1770,6 +1787,35 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL, NULL,
}; };
/*
* Note: IVHD 0x11 and 0x40 also contains exact copy
* of the IOMMU Extended Feature Register [MMIO Offset 0030h].
* Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
*/
static void __init late_iommu_features_init(struct amd_iommu *iommu)
{
u64 features;
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
return;
/* read extended feature bits */
features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
if (!iommu->features) {
iommu->features = features;
return;
}
/*
* Sanity check and warn if EFR values from
* IVHD and MMIO conflict.
*/
if (features != iommu->features)
pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
features, iommu->features);
}
static int __init iommu_init_pci(struct amd_iommu *iommu) static int __init iommu_init_pci(struct amd_iommu *iommu)
{ {
int cap_ptr = iommu->cap_ptr; int cap_ptr = iommu->cap_ptr;
...@@ -1789,8 +1835,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -1789,8 +1835,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false; amd_iommu_iotlb_sup = false;
/* read extended feature bits */ late_iommu_features_init(iommu);
iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
if (iommu_feature(iommu, FEATURE_GT)) { if (iommu_feature(iommu, FEATURE_GT)) {
int glxval; int glxval;
...@@ -2607,6 +2652,11 @@ static void __init free_dma_resources(void) ...@@ -2607,6 +2652,11 @@ static void __init free_dma_resources(void)
free_unity_maps(); free_unity_maps();
} }
static void __init ivinfo_init(void *ivrs)
{
amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
}
/* /*
* This is the hardware init function for AMD IOMMU in the system. * This is the hardware init function for AMD IOMMU in the system.
* This function is called either from amd_iommu_init or from the interrupt * This function is called either from amd_iommu_init or from the interrupt
...@@ -2661,6 +2711,8 @@ static int __init early_amd_iommu_init(void) ...@@ -2661,6 +2711,8 @@ static int __init early_amd_iommu_init(void)
if (ret) if (ret)
goto out; goto out;
ivinfo_init(ivrs_base);
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
......
...@@ -1496,7 +1496,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, ...@@ -1496,7 +1496,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
* Max Invs Pending (MIP) is set to 0 for now until we have DIT in * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
* ECAP. * ECAP.
*/ */
if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0)) if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n", pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
addr, size_order); addr, size_order);
......
...@@ -5440,6 +5440,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain, ...@@ -5440,6 +5440,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret; return ret;
} }
static bool domain_use_flush_queue(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool r = true;
if (intel_iommu_strict)
return false;
/*
* The flush queue implementation does not perform page-selective
* invalidations that are required for efficient TLB flushes in virtual
* environments. The benefit of batching is likely to be much lower than
* the overhead of synchronizing the virtual and physical IOMMU
* page-tables.
*/
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (!cap_caching_mode(iommu->cap))
continue;
pr_warn_once("IOMMU batching is disabled due to virtualization");
r = false;
break;
}
rcu_read_unlock();
return r;
}
static int static int
intel_iommu_domain_get_attr(struct iommu_domain *domain, intel_iommu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data) enum iommu_attr attr, void *data)
...@@ -5450,7 +5480,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain, ...@@ -5450,7 +5480,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain,
case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA:
switch (attr) { switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
*(int *)data = !intel_iommu_strict; *(int *)data = domain_use_flush_queue();
return 0; return 0;
default: default:
return -ENODEV; return -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment